repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
treycausey/scikit-learn | sklearn/svm/setup.py | 2 | 3246 | import os
from os.path import join
import numpy
import warnings
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
necromuralist/boston_housing | boston_housing/common.py | 1 | 8011 |
# python standard library
from collections import namedtuple
import os
# third party
from sklearn import datasets
from tabulate import tabulate
HousingData = namedtuple("HousingData", 'features prices names'.split())
def load_housing_data():
"""
Convenience function to get the Boston housing data
:return: housing_features, housing_prices
"""
city_data = datasets.load_boston()
return HousingData(features=city_data.data, prices=city_data.target,
names=city_data.feature_names)
CLIENT_FEATURES = [[11.95, 0.00, 18.100, 0, 0.6590, 5.6090, 90.00, 1.385, 24,
680.0, 20.20, 332.09, 12.13]]
class PrinterConstants(object):
__slots__ = ()
as_is = '{0}'
two_digits = '{0:.2f}'
count = 'Count'
proportion = 'Proportion'
# end PrinterConstants
class ValueCountsPrinter(object):
"""
A class to print a value-counts table
"""
def __init__(self, value_counts,
label,
format_string=PrinterConstants.as_is,
count_or_proportion=PrinterConstants.count):
"""
:param:
- `value_counts`: pandas value_counts Series
- `label`: header-label for the data
- `format_string`: format string for the count/proportion column
- `count_or_proportion`: Header for the count/proportion column
"""
self.value_counts = value_counts
self.label = label
self.format_string = format_string
self.count_or_proportion = count_or_proportion
self._first_width = None
self._second_width = None
self._row_format_string = None
self._header_string = None
self._top_separator = None
self._bottom_separator = None
self._sum_row = None
return
@property
def first_width(self):
"""
Width of first column's longest label
"""
if self._first_width is None:
self._first_width = len(self.label)
self._first_width = max(self._first_width,
max(len(str(i))
for i in self.value_counts.index))
return self._first_width
@property
def second_width(self):
"""
Width of the second column header
"""
if self._second_width is None:
self._second_width = len(self.count_or_proportion)
return self._second_width
@property
def row_format_string(self):
"""
Format-string for the rows
"""
if self._row_format_string is None:
self._row_format_string = "{{0:<{0}}} {{1:>{1}}}".format(self.first_width,
self.second_width)
return self._row_format_string
@property
def header_string(self):
"""
First line of the output
"""
if self._header_string is None:
self._header_string = self.row_format_string.format(self.label,
self.count_or_proportion)
return self._header_string
@property
def top_separator(self):
"""
Separator between header and counts
"""
if self._top_separator is None:
self._top_separator = '=' * (self.first_width + self.second_width + 1)
return self._top_separator
@property
def bottom_separator(self):
"""
Separator between counts and total
"""
if self._bottom_separator is None:
self._bottom_separator = '-' * len(self.top_separator)
return self._bottom_separator
@property
def sum_row(self):
"""
Final row with sum of count column
"""
if self._sum_row is None:
format_string = '{{0}} {{1:>{0}}}'.format(self.second_width)
sum_value = self.format_string.format(self.value_counts.values.sum())
self._sum_row = format_string.format(' ' * self.first_width,
sum_value)
return self._sum_row
def __str__(self):
content = '\n'.join((self.row_format_string.format(value,
self.format_string.format(self.value_counts.values[index]))
for index,value in enumerate(self.value_counts.index)))
return "{0}\n{1}\n{2}\n{3}\n{4}".format(self.header_string,
self.top_separator,
content,
self.bottom_separator,
self.sum_row)
def __call__(self):
"""
Convenience method to print the string
"""
print(str(self))
# end ValueCountsPrinter
class ValueProportionsPrinter(ValueCountsPrinter):
"""
Printer for proportion tables
"""
def __init__(self, value_counts, label,
format_string=PrinterConstants.two_digits,
count_or_proportion=PrinterConstants.proportion):
super(ValueProportionsPrinter, self).__init__(value_counts=value_counts,
label=label,
format_string=format_string,
count_or_proportion=count_or_proportion)
return
# end ValueProportionsPrinter
def print_value_counts(value_counts, header, format_string='{0}'):
"""
prints the value counts
:param:
- `value_counts`: pandas value_counts returned object
- `header`: list of header names (exactly two)
- `format_string`: format string for values
"""
first_width = len(header[0])
if value_counts.index.dtype == 'object':
first_width = max(first_width, max(len(i) for i in value_counts.index))
second_width = len(header[1])
format_string = "{{0:<{0}}} {{1:>{1}}}".format(first_width, second_width)
header_string = format_string.format(*header)
top_separator = '=' * (first_width + len(header[1]) + 1)
separator = '-' * len(top_separator)
print(header_string)
print(top_separator)
for index, value in enumerate(value_counts.index):
print(format_string.format(value,
format_string.format(value_counts
.values[index])))
print(separator)
print('{0} {1:>{2}}'.format(' ' * first_width,
format_string
.format(value_counts.values.sum()),
second_width))
return
def print_properties(data_type, values, construction, missing='None', table_format='orgtbl'):
"""
Prints out the table of properties
"""
print(tabulate([['Data Type', data_type],
['Values', values],
['Missing Values', missing],
['Construction', "Created from '{0}'".format(construction)]],
headers='Property Description'.split(),
tablefmt=table_format))
def print_image_directive(filename, figure, scale='95%', print_only=False):
"""
saves and prints the rst image directive
:param:
- `filename`: filename to save the image (without 'figures/' or file extension)
- `figure`: matplotlib figure to save the image
- `scale: percent scale for the image
- `print_only`: assume the figure exists, print directive only
:postcondition: figure saved, rst image directive output
"""
path = os.path.join('figures/', filename)
if not print_only:
figure.savefig(path + '.svg')
figure.savefig(path + '.pdf')
print(".. image:: {0}.*".format(path))
print(" :align: center")
print(" :scale: {0}".format(scale)) | mit |
fornaxco/Mars-Express-Challenge | preprocessing/merge_data.py | 1 | 1100 | # -*- coding: utf-8 -*-
"""
@author: fornax
"""
import os
import pandas as pd
os.chdir(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.append(os.path.dirname(os.getcwd()))
import prepare_data1 as prep
DATA_PATH = os.path.join('..', prep.DATA_PATH)
dates = ['2008-08-22_2010-07-10', '2010-07-10_2012-05-27', '2012-05-27_2014-04-14', '2014-04-14_2016-03-01']
contexts = ['dmop', 'evtf', 'ftl', 'saaf', 'ltdata']
for context in contexts:
aggr = []
for m_year, date in enumerate(dates):
if m_year < 3:
folder = '../train_set'
else:
folder = '../test_set'
df = pd.read_csv(os.path.join(folder, 'context--%s--%s.csv' % (date, context)))
df['m_year'] = m_year
aggr.append(df)
pd.concat(aggr).to_csv(os.path.join(DATA_PATH, context + '.csv'), index=False)
# power files
aggr = []
for m_year, date in enumerate(dates[:-1]):
df = pd.read_csv(os.path.join('../train_set', 'power--%s.csv' % (date)))
df['m_year'] = m_year
aggr.append(df)
pd.concat(aggr).to_csv(os.path.join(DATA_PATH, 'power.csv'), index=False)
| bsd-3-clause |
Mazecreator/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 34 | 12484 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| mit |
larsmans/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
msmbuilder/osprey | osprey/tests/test_dataset_loader.py | 2 | 6459 | from __future__ import print_function, absolute_import, division
import os
import shutil
import tempfile
from nose.plugins.skip import SkipTest
import numpy as np
import sklearn.datasets
from sklearn.externals.joblib import dump
from numpy.testing.decorators import skipif
from osprey.dataset_loaders import (DSVDatasetLoader, FilenameDatasetLoader,
JoblibDatasetLoader, HDF5DatasetLoader,
MDTrajDatasetLoader,
MSMBuilderDatasetLoader,
NumpyDatasetLoader, SklearnDatasetLoader)
try:
__import__('msmbuilder.example_datasets')
HAVE_MSMBUILDER = True
except:
HAVE_MSMBUILDER = False
def test_FilenameDatasetLoader_1():
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
open('filename-1', 'w').close()
open('filename-2', 'w').close()
assert FilenameDatasetLoader.short_name == 'filename'
loader = FilenameDatasetLoader('filename-*')
X, y = loader.load()
X_ref = list(map(os.path.abspath, ['filename-1', 'filename-2']))
assert sorted(X) == X_ref, X
assert y is None, y
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_JoblibDatasetLoader_1():
assert JoblibDatasetLoader.short_name == 'joblib'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
dump(np.zeros((10, 2)), 'f1.pkl')
loader = JoblibDatasetLoader('f1.pkl')
X, y = loader.load()
assert np.all(X == np.zeros((10, 2)))
assert y is None
# two files
dump(np.ones((10, 2)), 'f2.pkl')
loader = JoblibDatasetLoader('f*.pkl')
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 2)))
assert np.all(X[1] == np.ones((10, 2)))
assert y is None
# one file, with x and y
dump({'foo': 'baz', 'bar': 'qux'}, 'foobar.pkl')
loader = JoblibDatasetLoader('foobar.pkl', x_name='foo', y_name='bar')
X, y = loader.load()
assert X == 'baz', X
assert y == 'qux', y
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_HDF5DatasetLoader_1():
from mdtraj import io
assert HDF5DatasetLoader.short_name == 'hdf5'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
io.saveh('f1.h5', **{'test': np.zeros((10, 3))})
loader = HDF5DatasetLoader('f1.h5', concat=False)
X, y = loader.load()
assert np.all(X == np.zeros((10, 3)))
assert y is None
# two files
io.saveh('f2.h5', **{'test': np.ones((10, 3))})
loader = HDF5DatasetLoader('f*.h5', concat=False)
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 3)))
assert np.all(X[1] == np.ones((10, 3)))
assert y is None
# concat and stride and y_col
loader = HDF5DatasetLoader('f*.h5', y_col=2, stride=2, concat=True)
X, y = loader.load()
assert X.shape[0] == 10 and X.shape[1] == 2
assert y.shape[0] == 10
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_DSVDatasetLoader_1():
assert DSVDatasetLoader.short_name == 'dsv'
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
# one file
np.savetxt('f1.csv', np.zeros((10, 4)), fmt='%f,%f,%f,%f')
loader = DSVDatasetLoader('f1.csv', concat=False)
X, y = loader.load()
assert np.all(X == np.zeros((10, 4)))
assert y is None
# two files
np.savetxt('f2.csv', np.ones((10, 4)), fmt='%f,%f,%f,%f')
loader = DSVDatasetLoader('f*.csv', concat=False)
X, y = loader.load()
assert isinstance(X, list)
assert np.all(X[0] == np.zeros((10, 4)))
assert np.all(X[1] == np.ones((10, 4)))
assert y is None
# y_col and usecols and concat and stride
loader = DSVDatasetLoader('f*.csv',
y_col=3,
usecols=(0, 2),
stride=2,
concat=True)
X, y = loader.load()
assert X.shape[0] == 10 and X.shape[1] == 2
assert y.shape[0] == 10
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
@skipif(not HAVE_MSMBUILDER, 'this test requires MSMBuilder')
def test_MDTrajDatasetLoader_1():
try:
from msmbuilder.example_datasets import FsPeptide
except ImportError as e:
raise SkipTest(e)
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
fs_pept = FsPeptide(dirname)
fs_pept.get()
try:
loader = MDTrajDatasetLoader(os.path.join(fs_pept.data_dir, '*.xtc'),
topology=os.path.join(fs_pept.data_dir, 'fs-peptide.pdb'))
X, y = loader.load()
assert len(X) == 28
assert y is None
finally:
shutil.rmtree(dirname)
def test_MSMBuilderDatasetLoader_1():
# TODO Why does this work when other msmbuilder imports don't?
from msmbuilder.dataset import dataset
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
x = np.random.randn(10, 2)
ds = dataset(path, 'w', 'dir-npy')
ds[0] = x
loader = MSMBuilderDatasetLoader(path, fmt='dir-npy')
X, y = loader.load()
assert np.all(X[0] == x)
assert y is None
finally:
shutil.rmtree(path)
def test_NumpyDatasetLoader_1():
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
x = np.random.randn(10, 2)
np.save('f1.npy', x)
loader = NumpyDatasetLoader('f1.npy')
X, y = loader.load()
assert np.all(X[0] == x)
assert y is None
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_SklearnDatasetLoader_1():
assert SklearnDatasetLoader.short_name == 'sklearn_dataset'
X, y = SklearnDatasetLoader('load_iris').load()
iris = sklearn.datasets.load_iris()
assert np.all(X == iris['data'])
assert np.all(y == iris['target'])
| apache-2.0 |
sstoma/CellProfiler | cellprofiler/modules/trackobjects.py | 1 | 137657 | from cellprofiler.gui.help import USING_METADATA_HELP_REF, USING_METADATA_GROUPING_HELP_REF, LOADING_IMAGE_SEQ_HELP_REF
TM_OVERLAP = 'Overlap'
TM_DISTANCE = 'Distance'
TM_MEASUREMENTS = 'Measurements'
TM_LAP = "LAP"
TM_ALL = [TM_OVERLAP, TM_DISTANCE, TM_MEASUREMENTS,TM_LAP]
LT_NONE = 0
LT_PHASE_1 = 1
LT_SPLIT = 2
LT_MITOSIS = 3
LT_GAP = 4
KM_VEL = 1
KM_NO_VEL = 0
KM_NONE = -1
'''Random motion model, for instance Brownian motion'''
M_RANDOM = "Random"
'''Velocity motion model, object position depends on prior velocity'''
M_VELOCITY = "Velocity"
'''Random and velocity models'''
M_BOTH = "Both"
RADIUS_STD_SETTING_TEXT = 'Number of standard deviations for search radius'
RADIUS_LIMIT_SETTING_TEXT = 'Search radius limit, in pixel units (Min,Max)'
ONLY_IF_2ND_PHASE_LAP_TEXT = '''<i>(Used only if the %(TM_LAP)s tracking method is applied and the second phase is run)</i>'''%globals()
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON, PROTIP_AVOID_ICON, TECH_NOTE_ICON
__doc__ = """
<b>Track Objects</b> allows tracking objects throughout sequential
frames of a series of images, so that from frame to frame
each object maintains a unique identity in the output measurements
<hr>
This module must be placed downstream of a module that identifies objects
(e.g., <b>IdentifyPrimaryObjects</b>). <b>TrackObjects</b> will associate each
object with the same object in the frames before and after. This allows the study
of objects' lineages and the timing and characteristics of dynamic events in
movies.
<p>Images in CellProfiler are processed sequentially by frame (whether loaded as a
series of images or a movie file). To process a collection of images/movies,
you will need to do the following:
<ul>
<li>Define each individual movie using metadata
either contained within the image file itself or as part of the images nomenclature
or folder structure. %(USING_METADATA_HELP_REF)s.</li>
<li>Group the movies to make sure
that each image sequence is handled individually. %(USING_METADATA_GROUPING_HELP_REF)s.
</li>
</ul>
For complete details, see <i>%(LOADING_IMAGE_SEQ_HELP_REF)s</i>.</p>
<p>For an example pipeline using TrackObjects, see the CellProfiler
<a href="http://www.cellprofiler.org/examples.shtml#Tracking">Examples</a> webpage.</p>
<h4>Available measurements</h4>
<b>Object measurements</b>
<ul>
<li><i>Label:</i> Each tracked object is assigned a unique identifier (label).
Child objects resulting from a split or merge are assigned the label of the ancestor.</li>
<li><i>ParentImageNumber, ParentObjectNumber:</i> The <i>ImageNumber</i> and
<i>ObjectNumber</i> of the parent object in the prior frame. For a split, each
child object will have the label of the object it split from. For a merge,
the child will have the label of the closest parent.</li>
<li><i>TrajectoryX, TrajectoryY:</i> The direction of motion (in x and y coordinates) of the
object from the previous frame to the current frame.</li>
<li><i>DistanceTraveled:</i> The distance traveled by the object from the
previous frame to the current frame (calculated as the magnitude of
the trajectory vectors).</li>
<li><i>Displacement:</i> The shortest distance traveled by the object from its
initial starting position to the position in the current frame. That is, it is
the straight-line path between the two points.</li>
<li><i>IntegratedDistance:</i> The total distance traveled by the object during
the lifetime of the object.</li>
<li><i>Linearity:</i> A measure of how linear the object trajectity is during the
object lifetime. Calculated as (displacement from initial to final
location)/(integrated object distance). Value is in range of [0,1].</li>
<li><i>Lifetime:</i> The number of frames an objects has existed. The lifetime starts
at 1 at the frame when an object appears, and is incremented with each frame that the
object persists. At the final frame of the image set/movie, the
lifetimes of all remaining objects are output.</li>
<li><i>FinalAge:</i> Similar to <i>LifeTime</i> but is only output at the final
frame of the object's life (or the movie ends, whichever comes first). At this point,
the final age of the object is output; no values are stored for earlier frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value
is useful if you want to plot a histogram of the object lifetimes; all but the final age
can be ignored or filtered out.</dd>
</dl></li>
</ul>
The following object measurements are specific to the %(TM_LAP)s tracking method:
<ul>
<li><i>LinkType:</i> The linking method used to link the object to its parent.
Possible values are
<ul>
<li><b>%(LT_NONE)d</b>: The object was not linked to a parent.</li>
<li><b>%(LT_PHASE_1)d</b>: The object was linked to a parent in the previous frame.</li>
<li><b>%(LT_SPLIT)d</b>: The object is linked as the start of a split path.</li>
<li><b>%(LT_MITOSIS)s</b>: The object was linked to its parent as a daughter of
a mitotic pair.</li>
<li><b>%(LT_GAP)d</b>: The object was linked to a parent in a frame prior to the
previous frame (a gap).</li>
</ul>
Under some circumstances, multiple linking methods may apply to a given object, e.g, an
object may be both the beginning of a split path and not have a parent. However, only
one linking method is assigned.</li>
<li><i>MovementModel:</i>The movement model used to track the object.
<ul>
<li><b>%(KM_NO_VEL)d</b>: The <i>%(M_RANDOM)s</i> model was used.</li>
<li><b>%(KM_VEL)d</b>: The <i>%(M_VELOCITY)s</i> model was used.</li>
<li><b>-1</b>: Neither model was used. This can occur under two circumstances:
<ul>
<li>At the beginning of a trajectory, when there is no data to determine the model as
yet.</li>
<li>At the beginning of a closed gap, since a model was not actually applied to make
the link in the first phase.</li>
</ul></li>
</ul>
</li>
<li><i>LinkingDistance:</i>The difference between the propagated position of an
object and the object to which it is matched.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> A slowly decaying histogram of
these distances indicates that the search radius is large enough. A cut-off histogram
is a sign that the search radius is too small.</dd>
</dl></li>
<li><i>StandardDeviation:</i>The Kalman filter maintains a running estimate
of the variance of the error in estimated position for each model.
This measurement records the linking distance divided by the standard deviation
of the error when linking the object with its parent.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value is multiplied by
the <i>"%(RADIUS_STD_SETTING_TEXT)s"</i> setting to constrain the search distance.
A histogram of this value can help determine if the <i>"%(RADIUS_LIMIT_SETTING_TEXT)s"</i>
setting is appropriate.</dd>
</dl>
</li>
<li><i>GapLength:</i> The number of frames between an object and its parent.
For instance, an object in frame 3 with a parent in frame 1 has a gap length of
2.</li>
<li><i>GapScore:</i> If an object is linked to its parent by bridging a gap,
this value is the score for the gap.</li>
<li><i>SplitScore:</i> If an object linked to its parent via a split, this
value is the score for the split.</li>
<li><i>MergeScore:</i> If an object linked to a child via a merge, this value is
the score for the merge.</li>
<li><i>MitosisScore:</i> If an object linked to two children via a mitosis,
this value is the score for the mitosis.</li>
</ul>
<b>Image measurements</b>
<ul>
<li><i>LostObjectCount:</i> Number of objects that appear in the previous frame
but have no identifiable child in the current frame.</li>
<li><i>NewObjectCount:</i> Number of objects that appear in the current frame but
have no identifiable parent in the previous frame. </li>
<li><i>SplitObjectCount:</i> Number of objects in the current frame that
resulted from a split from a parent object in the previous frame.</li>
<li><i>MergedObjectCount:</i> Number of objects in the current frame that
resulted from the merging of child objects in the previous frame.</li>
</ul>
See also: Any of the <b>Measure</b> modules, <b>IdentifyPrimaryObjects</b>, <b>Groups</b>.
"""%globals()
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
logger = logging.getLogger(__name__)
import numpy as np
import numpy.ma
from scipy.ndimage import distance_transform_edt
import scipy.ndimage
import scipy.sparse
import cellprofiler.cpmodule as cpm
import cellprofiler.cpimage as cpi
import cellprofiler.pipeline as cpp
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.measurements as cpmeas
import cellprofiler.preferences as cpprefs
from cellprofiler.cpmath.lapjv import lapjv
import cellprofiler.cpmath.filter as cpfilter
from cellprofiler.cpmath.cpmorphology import fixup_scipy_ndimage_result as fix
from cellprofiler.cpmath.cpmorphology import centers_of_labels
from cellprofiler.cpmath.cpmorphology import associate_by_distance
from cellprofiler.cpmath.cpmorphology import all_connected_components
from cellprofiler.cpmath.index import Indexes
from identify import M_LOCATION_CENTER_X, M_LOCATION_CENTER_Y
from cellprofiler.gui.help import HELP_ON_MEASURING_DISTANCES
DT_COLOR_AND_NUMBER = 'Color and Number'
DT_COLOR_ONLY = 'Color Only'
DT_ALL = [DT_COLOR_AND_NUMBER, DT_COLOR_ONLY]
R_PARENT = "Parent"
F_PREFIX = "TrackObjects"
F_LABEL = "Label"
F_PARENT_OBJECT_NUMBER = "ParentObjectNumber"
F_PARENT_IMAGE_NUMBER = "ParentImageNumber"
F_TRAJECTORY_X = "TrajectoryX"
F_TRAJECTORY_Y = "TrajectoryY"
F_DISTANCE_TRAVELED = "DistanceTraveled"
F_DISPLACEMENT = "Displacement"
F_INTEGRATED_DISTANCE = "IntegratedDistance"
F_LINEARITY = "Linearity"
F_LIFETIME = "Lifetime"
F_FINAL_AGE = "FinalAge"
F_MOVEMENT_MODEL = "MovementModel"
F_LINK_TYPE = "LinkType"
F_LINKING_DISTANCE = "LinkingDistance"
F_STANDARD_DEVIATION = "StandardDeviation"
F_GAP_LENGTH = "GapLength"
F_GAP_SCORE = "GapScore"
F_MERGE_SCORE = "MergeScore"
F_SPLIT_SCORE = "SplitScore"
F_MITOSIS_SCORE = "MitosisScore"
F_KALMAN = "Kalman"
F_STATE = "State"
F_COV = "COV"
F_NOISE = "Noise"
F_VELOCITY_MODEL = "Vel"
F_STATIC_MODEL = "NoVel"
F_X = "X"
F_Y = "Y"
F_VX = "VX"
F_VY = "VY"
F_EXPT_ORIG_NUMTRACKS = "%s_OriginalNumberOfTracks"%F_PREFIX
F_EXPT_FILT_NUMTRACKS = "%s_FilteredNumberOfTracks"%F_PREFIX
def kalman_feature(model, matrix_or_vector, i, j=None):
'''Return the feature name for a Kalman feature
model - model used for Kalman feature: velocity or static
matrix_or_vector - the part of the Kalman state to save, vec, COV or noise
i - the name for the first (or only for vec and noise) index into the vector
j - the name of the second index into the matrix
'''
pieces = [F_KALMAN, model, matrix_or_vector, i]
if j is not None:
pieces.append(j)
return "_".join(pieces)
'''# of objects in the current frame without parents in the previous frame'''
F_NEW_OBJECT_COUNT = "NewObjectCount"
'''# of objects in the previous frame without parents in the new frame'''
F_LOST_OBJECT_COUNT = "LostObjectCount"
'''# of parents that split into more than one child'''
F_SPLIT_COUNT = "SplitObjectCount"
'''# of children that are merged from more than one parent'''
F_MERGE_COUNT = "MergedObjectCount"
'''Object area measurement for LAP method
The final part of the LAP method needs the object area measurement
which is stored using this name.'''
F_AREA = "Area"
F_ALL_COLTYPE_ALL = [(F_LABEL, cpmeas.COLTYPE_INTEGER),
(F_PARENT_OBJECT_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_PARENT_IMAGE_NUMBER, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_X, cpmeas.COLTYPE_INTEGER),
(F_TRAJECTORY_Y, cpmeas.COLTYPE_INTEGER),
(F_DISTANCE_TRAVELED, cpmeas.COLTYPE_FLOAT),
(F_DISPLACEMENT, cpmeas.COLTYPE_FLOAT),
(F_INTEGRATED_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_LINEARITY, cpmeas.COLTYPE_FLOAT),
(F_LIFETIME, cpmeas.COLTYPE_INTEGER),
(F_FINAL_AGE, cpmeas.COLTYPE_INTEGER)]
F_IMAGE_COLTYPE_ALL = [(F_NEW_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_LOST_OBJECT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_SPLIT_COUNT, cpmeas.COLTYPE_INTEGER),
(F_MERGE_COUNT, cpmeas.COLTYPE_INTEGER)]
F_ALL = [feature for feature, coltype in F_ALL_COLTYPE_ALL]
F_IMAGE_ALL = [feature for feature, coltype in F_IMAGE_COLTYPE_ALL]
class TrackObjects(cpm.CPModule):
module_name = 'TrackObjects'
category = "Object Processing"
variable_revision_number = 6
def create_settings(self):
self.tracking_method = cps.Choice(
'Choose a tracking method',
TM_ALL, doc="""
When trying to track an object in an image,
<b>TrackObjects</b> will search within a maximum
specified distance (see the <i>distance within which to search</i> setting)
of the object's location in the previous image, looking for a "match".
Objects that match are assigned the same number, or label, throughout the
entire movie.
There are several options for the method used to find a match. Choose
among these options based on which is most consistent from frame
to frame of your movie.
<ul>
<li><i>%(TM_OVERLAP)s:</i> Compares the amount of spatial overlap between identified objects in
the previous frame with those in the current frame. The object with the
greatest amount of spatial overlap will be assigned the same number (label).
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended when there is a high degree of overlap of an object from one frame to the next,
which is the case for movies with high frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_DISTANCE)s:</i> Compares the distance between each identified
object in the previous frame with that of the current frame. The
closest objects to each other will be assigned the same number (label).
Distances are measured from the perimeter of each object.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Recommended for cases where the objects are not very crowded but where
<i>%(TM_OVERLAP)s</i> does not work sufficiently well, which is the case
for movies with low frame rates relative to object motion.</dd>
</dl></li>
<li><i>%(TM_MEASUREMENTS)s:</i> Compares each object in the
current frame with objects in the previous frame based on a particular
feature you have measured for the objects (for example, a particular intensity
or shape measurement that can distinguish nearby objects). The object
with the closest-matching measurement will be selected as a match and will be
assigned the same number (label). This selection requires that you run the
specified <b>Measure</b> module previous to this module in the pipeline so
that the measurement values can be used to track the objects.</li>
<li><i>%(TM_LAP)s:</i> Uses the linear assignment problem (LAP) framework. The
linear assignment problem (LAP) algorithm (<i>Jaqaman et al., 2008</i>)
addresses the challenges of high object density, motion heterogeneity,
temporary disappearances, and object merging and splitting.
The algorithm first links objects between consecutive frames and then links
the resulting partial trajectories into complete trajectories. Both steps are formulated
as global combinatorial optimization problems whose solution identifies the overall
most likely set of object trajectories throughout a movie.
<p>Tracks are constructed from an image sequence by detecting objects in each
frame and linking objects between consecutive frames as a first step. This step alone
may result in incompletely tracked objects due to the appearance and disappearance
of objects, either in reality or apparently because of noise and imaging limitations.
To correct this, you may apply an optional second step which closes temporal gaps
between tracked objects and captures merging and splitting events. This step takes
place at the end of the analysis run.</p>
<p><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Some recommendations on optimizing
the LAP settings<br>
<ul>
<li><i>Work with a minimal subset of your data:</i> Attempting to optimize these settings
by examining a dataset containing many objects may be complicated and frustrating.
Therefore, it is a good idea to work with a smaller portion of the data containing the
behavior of interest.
<ul>
<li>For example, if splits characterize your data, trying narrowing down to following just
one cell that undergoes a split and examine a few frames before and after the event.</li>
<li>You can insert the <b>Crop</b> module to zoom in a region of interest, optimize the
settings and then either remove or disable the module when done.</li>
<li>You can also use the <b>Input</b> modules to limit yourself to a few frames under
consideration. For example, use the filtering settings in the <b>Images</b> module to
use only certain files from the movie in the pipeline.</li>
</ul></li>
<li><i>Begin by optimzing the settings for the first phase of the LAP:</i> The 2nd phase of
the LAP method depends on the results of the first phase. Therefore, it is a good idea to
optimize the first phase settings as the initial step.
<ul>
<li>You can disable 2nd phase calculation by selecting <i>%(NO)s</i> for "Run the second
phase of the LAP algorithm?"</li>
<li>By maximizing the the number of correct frame-to-frame links in the first phase, the
2nd phase will have less candidates to consider for linking and have a better chance of
closing gaps correctly. </li>
<li>If tracks are not being linked in the first phase, you may need to adjust the number
of standard deviations for the search radius and/or the radius limits (most likely
the maximum limit). See the help for these settings for details.</li>
</ul></li>
<li><i>Use any visualization tools at your disposal:</i>Visualizing the data often allows for
easier decision making as opposed to sorting through tabular data alone.
<ul>
<li>The <a href="http://cran.r-project.org/">R</a> open-source software package has
analysis and visualization tools that can query a database. See <a href=
"http://www.broadinstitute.org/~leek/rtracking.html">here</a> for a use case by our
lead software engineer.</li>
<li><a href="http://cellprofiler.org/tracer/">CellProfiler Tracer</a> is a version of
CellProfiler Analyst that contains tools for visualizing time-lapse data that has been exported
using the <b>ExportToDatabase</b> module.</li>
</ul></li>
</ul>
</p>
<p><b>References</b>
<ul>
<li>Jaqaman K, Loerke D, Mettlen M, Kuwata H, Grinstein S, Schmid SL, Danuser G. (2008)
"Robust single-particle tracking in live-cell time-lapse sequences."
<i>Nature Methods</i> 5(8),695-702.
<a href="http://dx.doi.org/10.1038/nmeth.1237">(link)</a></li>
<li>Jaqaman K, Danuser G. (2009) "Computational image analysis of cellular dynamics:
a case study based on particle tracking." Cold Spring Harb Protoc. 2009(12):pdb.top65.
<a href="http://dx.doi.org/10.1101/pdb.top65">(link)</a></li>
</ul></p>
</li>
</ul>"""%globals())
self.object_name = cps.ObjectNameSubscriber(
'Select the objects to track',cps.NONE, doc="""
Select the objects to be tracked by this module.""")
self.measurement = cps.Measurement(
'Select object measurement to use for tracking',
lambda : self.object_name.value, doc="""
<i>(Used only if Measurements is the tracking method)</i><br>
Select which type of measurement (category) and which specific feature from the
<b>Measure</b> module will be used for tracking. Select the feature name from
the popup box or see each <b>Measure</b> module's help for the list of
the features measured by that module. If necessary, you will also be asked
to specify additional details such as the
image from which the measurements originated or the measurement scale.""")
self.pixel_radius = cps.Integer(
'Maximum pixel distance to consider matches',50,minval=1,doc="""
Objects in the subsequent frame will be considered potential matches if
they are within this distance. To determine a suitable pixel distance, you can look
at the axis increments on each image (shown in pixel units) or
use the distance measurement tool. %(HELP_ON_MEASURING_DISTANCES)s"""%globals())
self.model = cps.Choice(
"Select the movement model",[M_RANDOM, M_VELOCITY, M_BOTH], value=M_BOTH,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
This setting controls how to predict an object's position in
the next frame, assuming that each object moves randomly with
a frame-to-frame variance in position that follows a Gaussian
distribution.<br>
<ul>
<li><i>%(M_RANDOM)s:</i> A model in which objects move due to
Brownian Motion or a similar process where the variance in position
differs between objects.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s">
Use this model if the objects move with some
random jitter around a stationary location.</dd>
</dl></li>
<li><i>%(M_VELOCITY)s:</i> A model in which the object moves with
a velocity. Both velocity and position (after correcting for
velocity) vary following a Gaussian distribution.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this model if
the objects move along a spatial trajectory in some direction over time.</dd>
</dl></li>
<li><i>%(M_BOTH)s:</i> <b>TrackObjects</b> will predict each
object's position using both models and use the model with the
lowest penalty to join an object in one frame with one in another.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Use this
option if both models above are applicable over time.</dd>
</dl></li>
</ul>""" % globals())
self.radius_std = cps.Float(
RADIUS_STD_SETTING_TEXT, 3, minval=1,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i>
<br>
<b>TrackObjects</b> will estimate the standard deviation of the error
between the observed and predicted positions of an object for
each movement model. It will constrain the search for matching
objects from one frame to the next to the standard deviation
of the error times the number of standard
deviations that you enter here.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>If the standard deviation is quite small, but the object makes a
large spatial jump, this value may need to be set higher in order
to increase the search area and thereby make the frame-to-frame
linkage.</li>
</ul></dd>
</dl>"""%globals())
self.radius_limit = cps.FloatRange(
RADIUS_LIMIT_SETTING_TEXT, (2, 10), minval = 0,doc = """
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
<b>TrackObjects</b> derives a search radius based on the error
estimation. Potentially, the module can make an erroneous assignment
with a large error, leading to a large estimated error for
the object in the next frame. Conversely, the module can arrive
at a small estimated error by chance, leading to a maximum radius
that does not track the object in a subsequent frame. The radius
limit constrains the maximum radius to reasonable values.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Special care must be taken to adjust the upper limit appropriate
to the data.</li>
<li>The lower limit should be set to a radius (in pixels) that is a
reasonable displacement for any object from one frame to the next. Hence,
if you notice that a frame-to-frame linkage is not being made for a
steadily-moving object, it may be that this value needs to be decreased
such that the displacement falls above the lower limit.</li>
<li>The upper limit should be set to the maximum reasonable
displacement (in pixels) under any circumstances. Hence, if you notice that
a frame-to-frame linkage is not being made in the case of a unusually
large displacement, this value may need to be increased.</li>
</ul></dd>
</dl>"""%globals())
self.wants_second_phase = cps.Binary(
"Run the second phase of the LAP algorithm?", True, doc="""
<i>(Used only if the %(TM_LAP)s tracking method is applied)</i><br>
Select <i>%(YES)s</i> to run the second phase of the LAP algorithm
after processing all images. Select <i>%(NO)s</i> to omit the
second phase or to perform the second phase when running the module
as a data tool.
<p>Since object tracks may start and end not only because of the true appearance
and disappearance of objects, but also because of apparent disappearances due
to noise and limitations in imaging, you may want to run the second phase
which attempts to close temporal gaps between tracked objects and tries to
capture merging and splitting events.</p>
<p>For additional details on optimizing the LAP settings, see the help for each
the settings.</p>"""%globals())
self.gap_cost = cps.Integer(
'Gap closing cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting assigns a cost to keeping a gap caused
when an object is missing from one of the frames of a track (the
alternative to keeping the gap is to bridge it by connecting
the tracks on either side of the missing frames).
The cost of bridging a gap is the distance, in pixels, of the
displacement of the object between frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the gap closing cost higher if tracks from objects in previous
frames are being erroneously joined, across a gap, to tracks from
objects in subsequent frames. </li>
<li>Set the gap closing cost lower if tracks
are not properly joined due to gaps caused by mis-segmentation.</li>
</ul></dd>
</dl></p>'''%globals())
self.split_cost = cps.Integer(
'Split alternative cost', 40, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks distinct
when the alternative is to make them into one track that
splits. A split occurs when an object in one frame is assigned
to the same track as two objects in a subsequent frame.
The split cost takes two components into account:
<ul>
<li>The area of the split object relative to the area of
the resulting objects.</li>
<li>The displacement of the resulting
objects relative to the position of the original object.</li>
</ul>
The split cost is roughly measured in pixels. The split alternative cost is
(conceptually) subtracted from the cost of making the split.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The split cost should be set lower if objects are being split
that should not be split. </li>
<li>The split cost should be set higher if objects
that should be split are not.</li>
<li>If you are confident that there should be no splits present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.merge_cost = cps.Integer(
'Merge alternative cost', 40, minval=1,doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of keeping two tracks
distinct when the alternative is to merge them into one.
A merge occurs when two objects in one frame are assigned to
the same track as a single object in a subsequent frame.
The merge score takes two components into account:
<ul>
<li>The area of the two objects
to be merged relative to the area of the resulting objects.</li>
<li>The displacement of the original objects relative to the final
object. </li>
</ul>
The merge cost is measured in pixels. The merge
alternative cost is (conceptually) subtracted from the
cost of making the merge.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the merge alternative cost lower if objects are being
merged when they should otherwise be kept separate. </li>
<li>Set the merge alternative cost
higher if objects that are not merged should be merged.</li>
<li>If you are confident that there should be no merges present in the data,
the cost can be set to 1 (the minimum value possible)</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_cost = cps.Integer(
'Mitosis alternative cost', 80, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the cost of not linking a parent and two daughters
via the mitosis model. the %(TM_LAP)s tracking method weighs this
cost against the score of a potential mitosis. The model expects
the daughters to be equidistant from the parent after mitosis,
so the parent location is expected to be midway between the daughters.
In addition, the model expects the daughters' areas to be equal
to the parent's area. The mitosis score is the distance error
of the parent times the area inequality ratio of the parent and
daughters (the larger of Area(daughters) / Area(parent) and
Area(parent) / Area(daughters)).<br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>An accepted mitosis closes two gaps, so all things being equal,
the mitosis alternative cost should be approximately double the
gap closing cost.</li>
<li>Increase the mitosis alternative cost to favor more mitoses
and decrease it to prevent more mitoses candidates from being
accepted.</li>
</ul></dd>
</dl>'''%globals())
self.mitosis_max_distance = cps.Integer(
'Maximum mitosis distance, in pixel units', 40, minval=1, doc= '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting is the maximum allowed distance in pixels of either
of the daughter candidate centroids after mitosis from the parent candidate.
'''%globals())
self.max_gap_score = cps.Integer(
'Maximum gap displacement, in pixel units', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
displacements during the second phase.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The maximum gap displacement should be set to roughly
the maximum displacement of an object's center from frame to frame. An object that makes large
frame-to-frame jumps should have a higher value for this setting than one that only moves slightly.</li>
<li>Be aware that the LAP algorithm will run more slowly with a higher maximum gap displacement
value, since the higher this value, the more objects that must be compared at each step.</li>
<li>Objects that would have been tracked between successive frames for a lower maximum displacement
may not be tracked if the value is set higher.</li>
<li>This setting may be the culprit if an object is not tracked fame-to-frame despite optimizing
the LAP first-pass settings.</li>
</ul></dd>
</dl>'''%globals())
self.max_merge_score = cps.Integer(
'Maximum merge score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large
merge scores. The merge score has two components:
<ul>
<li>The area of the resulting merged object relative to the area of the
two objects to be merged.</li>
<li>The distances between the objects to be merged and the resulting object. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a higher maximum merge score value. </li>
<li>Objects that would have been merged at a lower maximum merge score will not be considered for merging.</li>
</ul></dd>
</dl>'''%globals())
self.max_split_score = cps.Integer(
'Maximum split score', 50, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
This setting acts as a filter for unreasonably large split scores. The split score has two components:
<ul>
<li>The area of the initial object relative to the area of the
two objects resulting from the split.</li>
<li>The distances between the original and resulting objects. </li>
</ul>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>The LAP algorithm will run more slowly with a maximum split score value. </li>
<li>Objects that would have been split at a lower maximum split score will not be considered for splitting.</li>
</ul></dd>
</dl>'''%globals())
self.max_frame_distance = cps.Integer(
'Maximum temporal gap, in frames', 5, minval=1, doc = '''
%(ONLY_IF_2ND_PHASE_LAP_TEXT)s<br>
<b>Care must be taken to adjust this setting appropriate to the data.</b><br>
This setting controls the maximum number of frames that can
be skipped when merging a temporal gap caused by an unsegmented object.
These gaps occur when an image is mis-segmented and identification
fails to find an object in one or more frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>Set the maximum gap higher in order to have more chance of correctly recapturing an object after
erroneously losing the original for a few frames.</li>
<li>Set the maximum gap lower to reduce the chance of erroneously connecting to the wrong object after
correctly losing the original object (e.g., if the cell dies or moves off-screen).</li>
</ul></dd>
</dl>'''%globals())
self.wants_lifetime_filtering = cps.Binary(
'Filter objects by lifetime?', False, doc = '''
Select <i>%(YES)s</i> if you want objects to be filtered by their
lifetime, i.e., total duration in frames. This is useful for
marking objects which transiently appear and disappear, such
as the results of a mis-segmentation. <br>
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> Recommendations:
<ul>
<li>This operation does not actually delete the filtered object,
but merely removes its label from the tracked object list;
the filtered object's per-object measurements are retained.</li>
<li>An object can be filtered only if it is tracked as an unique object.
Splits continue the lifetime count from their parents, so the minimum
lifetime value does not apply to them.</li>
</ul></dd>
</dl>'''%globals())
self.wants_minimum_lifetime = cps.Binary(
'Filter using a minimum lifetime?', True, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a minimum number of frames.'''%globals())
self.min_lifetime = cps.Integer(
'Minimum lifetime', 1, minval=1,doc="""
Enter the minimum number of frames an object is permitted to persist. Objects
which last this number of frames or lower are filtered out.""")
self.wants_maximum_lifetime = cps.Binary(
'Filter using a maximum lifetime?', False, doc = '''
<i>(Used only if objects are filtered by lifetime)</i><br>
Select <i>%(YES)s</i> to filter the object on the basis of a maximum number of frames.'''%globals())
self.max_lifetime = cps.Integer(
'Maximum lifetime', 100, doc="""
Enter the maximum number of frames an object is permitted to persist. Objects
which last this number of frames or more are filtered out.""")
self.display_type = cps.Choice(
'Select display option', DT_ALL, doc="""
The output image can be saved as:
<ul>
<li><i>%(DT_COLOR_ONLY)s:</i> A color-labeled image, with each tracked
object assigned a unique color</li>
<li><i>%(DT_COLOR_AND_NUMBER)s:</i> Same as above but with the tracked object
number superimposed.</li>
</ul>"""%globals())
self.wants_image = cps.Binary(
"Save color-coded image?", False, doc="""
Select <i>%(YES)s</i> to retain the image showing the tracked objects
for later use in the pipeline. For example, a common use is for quality control purposes
saving the image with the <b>SaveImages</b> module.
<p>Please note that if you are using the second phase of the %(TM_LAP)s method,
the final labels are not assigned until <i>after</i> the pipeline has
completed the analysis run. That means that saving the color-coded image
will only show the penultimate result and not the final product.</p>."""%globals())
self.image_name = cps.ImageNameProvider(
"Name the output image", "TrackedCells", doc = '''
<i>(Used only if saving the color-coded image)</i><br>
Enter a name to give the color-coded image of tracked labels.''')
def settings(self):
return [self.tracking_method, self.object_name, self.measurement,
self.pixel_radius, self.display_type, self.wants_image,
self.image_name, self.model,
self.radius_std, self.radius_limit,
self.wants_second_phase,
self.gap_cost, self.split_cost, self.merge_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.wants_lifetime_filtering, self.wants_minimum_lifetime,
self.min_lifetime, self.wants_maximum_lifetime,
self.max_lifetime, self.mitosis_cost, self.mitosis_max_distance]
def validate_module(self, pipeline):
'''Make sure that the user has selected some limits when filtering'''
if (self.tracking_method == TM_LAP and
self.wants_lifetime_filtering.value and
(self.wants_minimum_lifetime.value == False and self.wants_minimum_lifetime.value == False) ):
raise cps.ValidationError(
'Please enter a minimum and/or maximum lifetime limit',
self.wants_lifetime_filtering)
def visible_settings(self):
result = [self.tracking_method, self.object_name]
if self.tracking_method == TM_MEASUREMENTS:
result += [ self.measurement]
if self.tracking_method == TM_LAP:
result += [self.model, self.radius_std, self.radius_limit]
result += [self.wants_second_phase]
if self.wants_second_phase:
result += [
self.gap_cost, self.split_cost, self.merge_cost,
self.mitosis_cost,
self.max_gap_score, self.max_split_score,
self.max_merge_score, self.max_frame_distance,
self.mitosis_max_distance]
else:
result += [self.pixel_radius]
result += [ self.wants_lifetime_filtering]
if self.wants_lifetime_filtering:
result += [ self.wants_minimum_lifetime ]
if self.wants_minimum_lifetime:
result += [ self.min_lifetime ]
result += [ self.wants_maximum_lifetime ]
if self.wants_maximum_lifetime:
result += [ self.max_lifetime ]
result +=[ self.display_type, self.wants_image]
if self.wants_image.value:
result += [self.image_name]
return result
@property
def static_model(self):
return self.model in (M_RANDOM, M_BOTH)
@property
def velocity_model(self):
return self.model in (M_VELOCITY, M_BOTH)
def get_ws_dictionary(self, workspace):
return self.get_dictionary(workspace.image_set_list)
def __get(self, field, workspace, default):
if self.get_ws_dictionary(workspace).has_key(field):
return self.get_ws_dictionary(workspace)[field]
return default
def __set(self, field, workspace, value):
self.get_ws_dictionary(workspace)[field] = value
def get_group_image_numbers(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
d = self.get_ws_dictionary(workspace)
group_number = m.get_group_number()
if not d.has_key("group_number") or d["group_number"] != group_number:
d["group_number"] = group_number
group_indexes = np.array([
(m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_INDEX, i), i)
for i in m.get_image_numbers()
if m.get_measurement(cpmeas.IMAGE, cpmeas.GROUP_NUMBER, i) ==
group_number], int)
order = np.lexsort([group_indexes[:, 0]])
d["group_image_numbers"] = group_indexes[order, 1]
return d["group_image_numbers"]
def get_saved_measurements(self, workspace):
return self.__get("measurements", workspace, np.array([], float))
def set_saved_measurements(self, workspace, value):
self.__set("measurements", workspace, value)
def get_saved_coordinates(self, workspace):
return self.__get("coordinates", workspace, np.zeros((2,0), int))
def set_saved_coordinates(self, workspace, value):
self.__set("coordinates", workspace, value)
def get_orig_coordinates(self, workspace):
'''The coordinates of the first occurrence of an object's ancestor'''
return self.__get("orig coordinates", workspace, np.zeros((2,0), int))
def set_orig_coordinates(self, workspace, value):
self.__set("orig coordinates", workspace, value)
def get_saved_labels(self, workspace):
return self.__get("labels", workspace, None)
def set_saved_labels(self, workspace, value):
self.__set("labels", workspace, value)
def get_saved_object_numbers(self, workspace):
return self.__get("object_numbers", workspace, np.array([], int))
def set_saved_object_numbers(self, workspace, value):
return self.__set("object_numbers", workspace, value)
def get_saved_ages(self, workspace):
return self.__get("ages", workspace, np.array([], int))
def set_saved_ages(self, workspace, values):
self.__set("ages", workspace, values)
def get_saved_distances(self, workspace):
return self.__get("distances", workspace, np.zeros((0,)))
def set_saved_distances(self, workspace, values):
self.__set("distances", workspace, values)
def get_max_object_number(self, workspace):
return self.__get("max_object_number", workspace, 0)
def set_max_object_number(self, workspace, value):
self.__set("max_object_number", workspace, value)
def get_kalman_states(self, workspace):
return self.__get("kalman_states", workspace, None)
def set_kalman_states(self, workspace, value):
self.__set("kalman_states", workspace, value)
def prepare_group(self, workspace, grouping, image_numbers):
'''Erase any tracking information at the start of a run'''
d = self.get_dictionary(workspace.image_set_list)
d.clear()
return True
def measurement_name(self, feature):
'''Return a measurement name for the given feature'''
if self.tracking_method == TM_LAP:
return "%s_%s" % (F_PREFIX, feature)
return "%s_%s_%s" % (F_PREFIX, feature, str(self.pixel_radius.value))
def image_measurement_name(self, feature):
'''Return a measurement name for an image measurement'''
if self.tracking_method == TM_LAP:
return "%s_%s_%s" % (F_PREFIX, feature, self.object_name.value)
return "%s_%s_%s_%s" % (F_PREFIX, feature, self.object_name.value,
str(self.pixel_radius.value))
def add_measurement(self, workspace, feature, values):
'''Add a measurement to the workspace's measurements
workspace - current image set's workspace
feature - name of feature being measured
values - one value per object
'''
workspace.measurements.add_measurement(
self.object_name.value,
self.measurement_name(feature),
values)
def add_image_measurement(self, workspace, feature, value):
measurement_name = self.image_measurement_name(feature)
workspace.measurements.add_image_measurement(measurement_name, value)
def run(self, workspace):
objects = workspace.object_set.get_objects(self.object_name.value)
if self.tracking_method == TM_DISTANCE:
self.run_distance(workspace, objects)
elif self.tracking_method == TM_OVERLAP:
self.run_overlap(workspace, objects)
elif self.tracking_method == TM_MEASUREMENTS:
self.run_measurements(workspace, objects)
elif self.tracking_method == TM_LAP:
self.run_lapdistance(workspace, objects)
else:
raise NotImplementedError("Unimplemented tracking method: %s" %
self.tracking_method.value)
if self.wants_image.value:
import matplotlib.figure
import matplotlib.axes
import matplotlib.backends.backend_agg
import matplotlib.transforms
from cellprofiler.gui.cpfigure_tools import figure_to_image, only_display_image
figure = matplotlib.figure.Figure()
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
ax = figure.add_subplot(1,1,1)
self.draw(objects.segmented, ax,
self.get_saved_object_numbers(workspace))
#
# This is the recipe for just showing the axis
#
only_display_image(figure, objects.segmented.shape)
image_pixels = figure_to_image(figure, dpi=figure.dpi)
image = cpi.Image(image_pixels)
workspace.image_set.add(self.image_name.value, image)
if self.show_window:
workspace.display_data.labels = objects.segmented
workspace.display_data.object_numbers = \
self.get_saved_object_numbers(workspace)
def display(self, workspace, figure):
if hasattr(workspace.display_data, "labels"):
figure.set_subplots((1, 1))
subfigure = figure.figure
subfigure.clf()
ax = subfigure.add_subplot(1,1,1)
self.draw(workspace.display_data.labels, ax,
workspace.display_data.object_numbers)
else:
# We get here after running as a data tool
figure.figure.text(.5, .5, "Analysis complete",
ha="center", va="center")
def draw(self, labels, ax, object_numbers):
import matplotlib
indexer = np.zeros(len(object_numbers)+1,int)
indexer[1:] = object_numbers
#
# We want to keep the colors stable, but we also want the
# largest possible separation between adjacent colors. So, here
# we reverse the significance of the bits in the indices so
# that adjacent number (e.g. 0 and 1) differ by 128, roughly
#
pow_of_2 = 2**np.mgrid[0:8,0:len(indexer)][0]
bits = (indexer & pow_of_2).astype(bool)
indexer = np.sum(bits.transpose() * (2 ** np.arange(7,-1,-1)), 1)
recolored_labels = indexer[labels]
cm = matplotlib.cm.get_cmap(cpprefs.get_default_colormap())
cm.set_bad((0,0,0))
norm = matplotlib.colors.BoundaryNorm(range(256), 256)
img = ax.imshow(numpy.ma.array(recolored_labels, mask=(labels==0)),
cmap=cm, norm=norm)
if self.display_type == DT_COLOR_AND_NUMBER:
i,j = centers_of_labels(labels)
for n, x, y in zip(object_numbers, j, i):
if np.isnan(x) or np.isnan(y):
# This happens if there are missing labels
continue
ax.annotate(str(n), xy=(x,y),color='white',
arrowprops=dict(visible=False))
def run_distance(self, workspace, objects):
'''Track objects based on distance'''
old_i, old_j = self.get_saved_coordinates(workspace)
if len(old_i):
distances, (i,j) = distance_transform_edt(objects.segmented == 0,
return_indices=True)
#
# Look up the coordinates of the nearest new object (given by
# the transform i,j), then look up the label at that coordinate
# (objects.segmented[#,#])
#
new_object_numbers = objects.segmented[i[old_i, old_j],
j[old_i, old_j]]
#
# Mask out any objects at too great of a distance
#
new_object_numbers[distances[old_i, old_j] >
self.pixel_radius.value] = 0
#
# Do the same with the new centers and old objects
#
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
old_labels = self.get_saved_labels(workspace)
distances, (old_i,old_j) = distance_transform_edt(
old_labels == 0,
return_indices=True)
old_object_numbers = old_labels[old_i[i, j],
old_j[i, j]]
old_object_numbers[distances[i, j] > self.pixel_radius.value] = 0
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
self.set_saved_labels(workspace, objects.segmented)
def run_lapdistance(self, workspace, objects):
'''Track objects based on distance'''
m = workspace.measurements
old_i, old_j = self.get_saved_coordinates(workspace)
n_old = len(old_i)
#
# Automatically set the cost of birth and death above
# that of the largest allowable cost.
#
costBorn = costDie = self.radius_limit.max * 1.10
kalman_states = self.get_kalman_states(workspace)
if kalman_states == None:
if self.static_model:
kalman_states = [ cpfilter.static_kalman_model()]
else:
kalman_states = []
if self.velocity_model:
kalman_states.append(cpfilter.velocity_kalman_model())
areas = fix(scipy.ndimage.sum(
np.ones(objects.segmented.shape), objects.segmented,
np.arange(1, np.max(objects.segmented) + 1,dtype=np.int32)))
areas = areas.astype(int)
model_types = np.array(
[m for m, s in ((KM_NO_VEL, self.static_model),
(KM_VEL, self.velocity_model)) if s], int)
if n_old > 0:
new_i, new_j = centers_of_labels(objects.segmented)
n_new = len(new_i)
i,j = np.mgrid[0:n_old, 0:n_new]
##############################
#
# Kalman filter prediction
#
#
# We take the lowest cost among all possible models
#
minDist = np.ones((n_old, n_new)) * self.radius_limit.max
d = np.ones((n_old, n_new)) * np.inf
sd = np.zeros((n_old, n_new))
# The index of the Kalman filter used: -1 means not used
kalman_used = -np.ones((n_old, n_new), int)
for nkalman, kalman_state in enumerate(kalman_states):
assert isinstance(kalman_state, cpfilter.KalmanState)
obs = kalman_state.predicted_obs_vec
dk = np.sqrt((obs[i,0] - new_i[j])**2 +
(obs[i,1] - new_j[j])**2)
noise_sd = np.sqrt(np.sum(kalman_state.noise_var[:,0:2], 1))
radius = np.maximum(np.minimum(noise_sd * self.radius_std.value,
self.radius_limit.max),
self.radius_limit.min)
is_best = ((dk < d) & (dk < radius[:, np.newaxis]))
d[is_best] = dk[is_best]
minDist[is_best] = radius[i][is_best]
kalman_used[is_best] = nkalman
minDist = np.maximum(np.minimum(minDist, self.radius_limit.max),
self.radius_limit.min)
#
#############################
#
# Linear assignment setup
#
n = len(old_i)+len(new_i)
kk = np.zeros((n+10)*(n+10), np.int32)
first = np.zeros(n+10, np.int32)
cc = np.zeros((n+10)*(n+10), np.float)
t = np.argwhere((d < minDist))
x = np.sqrt((old_i[t[0:t.size, 0]]-new_i[t[0:t.size, 1]])**2 + (old_j[t[0:t.size, 0]]-new_j[t[0:t.size, 1]])**2)
t = t+1
t = np.column_stack((t, x))
a = np.arange(len(old_i))+2
x = np.searchsorted(t[0:(t.size/2),0], a)
a = np.arange(len(old_i))+1
b = np.arange(len(old_i))+len(new_i)+1
c = np.zeros(len(old_i))+costDie
b = np.column_stack((a, b, c))
t = np.insert(t, x, b, 0)
i,j = np.mgrid[0:len(new_i),0:len(old_i)+1]
i = i+len(old_i)+1
j = j+len(new_i)
j[0:len(new_i)+1,0] = i[0:len(new_i)+1,0]-len(old_i)
x = np.zeros((len(new_i),len(old_i)+1))
x[0:len(new_i)+1,0] = costBorn
i = i.flatten()
j = j.flatten()
x = x.flatten()
x = np.column_stack((i, j, x))
t = np.vstack((t, x))
# Tack 0 <-> 0 at the start because object #s start at 1
i = np.hstack([0,t[:,0].astype(int)])
j = np.hstack([0,t[:,1].astype(int)])
c = np.hstack([0,t[:,2]])
x, y = lapjv(i, j, c)
a = np.argwhere(x > len(new_i))
b = np.argwhere(y >len(old_i))
x[a[0:len(a)]] = 0
y[b[0:len(b)]] = 0
a = np.arange(len(old_i))+1
b = np.arange(len(new_i))+1
new_object_numbers = x[a[0:len(a)]].astype(int)
old_object_numbers = y[b[0:len(b)]].astype(int)
###############################
#
# Kalman filter update
#
model_idx = np.zeros(len(old_object_numbers), int)
linking_distance = np.ones(len(old_object_numbers)) * np.NaN
standard_deviation = np.ones(len(old_object_numbers)) * np.NaN
model_type = np.ones(len(old_object_numbers), int) * KM_NONE
link_type = np.ones(len(old_object_numbers), int) * LT_NONE
mask = old_object_numbers > 0
old_idx = old_object_numbers - 1
model_idx[mask] =\
kalman_used[old_idx[mask], mask]
linking_distance[mask] = d[old_idx[mask], mask]
standard_deviation[mask] = \
linking_distance[mask] / noise_sd[old_idx[mask]]
model_type[mask] = model_types[model_idx[mask]]
link_type[mask] = LT_PHASE_1
#
# The measurement covariance is the square of the
# standard deviation of the measurement error. Assume
# that the measurement error comes from not knowing where
# the center is within the cell, then the error is
# proportional to the radius and the square to the area.
#
measurement_variance = areas.astype(float) / np.pi
#
# Broadcast the measurement error into a diagonal matrix
#
r = (measurement_variance[:, np.newaxis, np.newaxis] *
np.eye(2)[np.newaxis,:,:])
new_kalman_states = []
for kalman_state in kalman_states:
#
# The process noise covariance is a diagonal of the
# state noise variance.
#
state_len = kalman_state.state_len
q = np.zeros((len(old_idx), state_len, state_len))
if np.any(mask):
#
# Broadcast into the diagonal
#
new_idx = np.arange(len(old_idx))[mask]
matching_idx = old_idx[new_idx]
i,j = np.mgrid[0:len(matching_idx),0:state_len]
q[new_idx[i], j, j] = \
kalman_state.noise_var[matching_idx[i],j]
new_kalman_state = cpfilter.kalman_filter(
kalman_state,
old_idx,
np.column_stack((new_i, new_j)),
q,r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
self.map_objects(workspace,
new_object_numbers,
old_object_numbers,
i,j)
else:
i,j = centers_of_labels(objects.segmented)
count = len(i)
link_type = np.ones(count, int) * LT_NONE
model_type = np.ones(count, int) * KM_NONE
linking_distance = np.ones(count) * np.NaN
standard_deviation = np.ones(count) * np.NaN
#
# Initialize the kalman_state with the new objects
#
new_kalman_states = []
r = np.zeros((count, 2, 2))
for kalman_state in kalman_states:
q = np.zeros((count, kalman_state.state_len, kalman_state.state_len))
new_kalman_state = cpfilter.kalman_filter(
kalman_state, -np.ones(count),
np.column_stack((i,j)), q, r)
new_kalman_states.append(new_kalman_state)
self.set_kalman_states(workspace, new_kalman_states)
i = (i+.5).astype(int)
j = (j+.5).astype(int)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
m.add_measurement(self.object_name.value,
self.measurement_name(F_AREA),
areas)
m[self.object_name.value,
self.measurement_name(F_LINKING_DISTANCE)] = linking_distance
m[self.object_name.value,
self.measurement_name(F_STANDARD_DEVIATION)] = standard_deviation
m[self.object_name.value,
self.measurement_name(F_MOVEMENT_MODEL)] = model_type
m[self.object_name.value,
self.measurement_name(F_LINK_TYPE)] = link_type
self.save_kalman_measurements(workspace)
self.set_saved_labels(workspace, objects.segmented)
def get_kalman_models(self):
'''Return tuples of model and names of the vector elements'''
if self.static_model:
models = [ (F_STATIC_MODEL, (F_Y, F_X))]
else:
models = []
if self.velocity_model:
models.append((F_VELOCITY_MODEL, (F_Y, F_X, F_VY, F_VX)))
return models
def save_kalman_measurements(self, workspace):
'''Save the first-pass state_vec, state_cov and state_noise'''
m = workspace.measurements
object_name = self.object_name.value
for (model, elements), kalman_state in zip(
self.get_kalman_models(), self.get_kalman_states(workspace)):
assert isinstance(kalman_state, cpfilter.KalmanState)
nobjs = len(kalman_state.state_vec)
if nobjs > 0:
#
# Get the last state_noise entry for each object
#
# scipy.ndimage.maximum probably should return NaN if
# no index exists, but, in 0.8.0, returns 0. So stack
# a bunch of -1 values so every object will have a "-1"
# index.
last_idx = scipy.ndimage.maximum(
np.hstack((
-np.ones(nobjs),
np.arange(len(kalman_state.state_noise_idx)))),
np.hstack((
np.arange(nobjs), kalman_state.state_noise_idx)),
np.arange(nobjs))
last_idx = last_idx.astype(int)
for i, element in enumerate(elements):
#
# state_vec
#
mname = self.measurement_name(
kalman_feature(model, F_STATE, element))
values = np.zeros(0) if nobjs == 0 else kalman_state.state_vec[:,i]
m.add_measurement(object_name, mname, values)
#
# state_noise
#
mname = self.measurement_name(
kalman_feature(model, F_NOISE, element))
values = np.zeros(nobjs)
if nobjs > 0:
values[last_idx == -1] = np.NaN
values[last_idx > -1] = kalman_state.state_noise[last_idx[last_idx > -1], i]
m.add_measurement(object_name, mname, values)
#
# state_cov
#
for j, el2 in enumerate(elements):
mname = self.measurement_name(
kalman_feature(model, F_COV, element, el2))
values = kalman_state.state_cov[:, i, j]
m.add_measurement(object_name, mname, values)
def run_overlap(self, workspace, objects):
'''Track objects by maximum # of overlapping pixels'''
current_labels = objects.segmented
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
mask = ((current_labels > 0) & (old_labels > 0))
cur_count = np.max(current_labels)
old_count = np.max(old_labels)
count = np.sum(mask)
if count == 0:
# There's no overlap.
self.map_objects(workspace,
np.zeros(old_count, int),
np.zeros(cur_count,int),
i,j)
else:
cur = current_labels[mask]
old = old_labels[mask]
histogram = scipy.sparse.coo_matrix(
(np.ones(count),(cur, old)),
shape=(cur_count+1,old_count+1)).toarray()
old_of_new = np.argmax(histogram, 1)[1:]
new_of_old = np.argmax(histogram, 0)[1:]
#
# The cast here seems to be needed to make scipy.ndimage.sum
# work. See http://projects.scipy.org/numpy/ticket/1012
#
old_of_new = np.array(old_of_new, np.int16)
old_of_new = np.array(old_of_new, np.int32)
new_of_old = np.array(new_of_old, np.int16)
new_of_old = np.array(new_of_old, np.int32)
self.map_objects(workspace,
new_of_old,
old_of_new,
i,j)
self.set_saved_labels(workspace, current_labels)
def run_measurements(self, workspace, objects):
current_labels = objects.segmented
new_measurements = workspace.measurements.get_current_measurement(
self.object_name.value,
self.measurement.value)
old_measurements = self.get_saved_measurements(workspace)
old_labels = self.get_saved_labels(workspace)
i,j = (centers_of_labels(objects.segmented)+.5).astype(int)
if old_labels is None:
count = len(i)
self.map_objects(workspace, np.zeros((0,),int),
np.zeros(count,int), i,j)
else:
associations = associate_by_distance(old_labels, current_labels,
self.pixel_radius.value)
best_child = np.zeros(len(old_measurements), int)
best_parent = np.zeros(len(new_measurements), int)
best_child_measurement = (np.ones(len(old_measurements), int) *
np.finfo(float).max)
best_parent_measurement = (np.ones(len(new_measurements), int) *
np.finfo(float).max)
for old, new in associations:
diff = abs(old_measurements[old-1] - new_measurements[new-1])
if diff < best_child_measurement[old-1]:
best_child[old-1] = new
best_child_measurement[old-1] = diff
if diff < best_parent_measurement[new-1]:
best_parent[new-1] = old
best_parent_measurement[new-1] = diff
self.map_objects(workspace, best_child, best_parent, i,j)
self.set_saved_labels(workspace,current_labels)
self.set_saved_measurements(workspace, new_measurements)
def run_as_data_tool(self, workspace):
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
group_numbers = {}
for i in m.get_image_numbers():
group_number = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_NUMBER, i)
group_index = m.get_measurement(cpmeas.IMAGE,
cpmeas.GROUP_INDEX, i)
if ((not group_numbers.has_key(group_number)) or
(group_numbers[group_number][1] > group_index)):
group_numbers[group_number] = (i, group_index)
for group_number in sorted(group_numbers.keys()):
m.image_set_number = group_numbers[group_number][0]
self.post_group(workspace, {})
def flood(self, i, at, a, b, c, d, z):
z[i] = at
if(a[i] != -1 and z[a[i]] == 0):
z = self.flood(a[i], at, a, b, c, d, z)
if(b[i] != -1 and z[b[i]] == 0):
z = self.flood(b[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
if(c[i] != -1 and z[c[i]] == 0):
z = self.flood(c[i], at, a, b, c, d, z)
return z
def is_aggregation_module(self):
'''We connect objects across imagesets within a group = aggregation'''
return True
def post_group(self, workspace, grouping):
# If any tracking method other than LAP, recalculate measurements
# (Really, only the final age needs to be re-done)
if self.tracking_method != TM_LAP:
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
image_numbers = self.get_group_image_numbers(workspace)
self.recalculate_group(workspace, image_numbers)
return
if (self.tracking_method != TM_LAP or
not self.wants_second_phase):
return
gap_cost = float(self.gap_cost.value)
split_alternative_cost = float(self.split_cost.value) / 2
merge_alternative_cost = float(self.merge_cost.value)
mitosis_alternative_cost = float(self.mitosis_cost.value)
max_gap_score = self.max_gap_score.value
max_merge_score = self.max_merge_score.value
max_split_score = self.max_split_score.value / 2 # to match legacy
max_frame_difference = self.max_frame_distance.value
m = workspace.measurements
assert(isinstance(m, cpmeas.Measurements))
image_numbers = self.get_group_image_numbers(workspace)
object_name = self.object_name.value
label, object_numbers, a, b, Area, \
parent_object_numbers, parent_image_numbers = [
[m.get_measurement(object_name, feature, i).astype(mtype)
for i in image_numbers]
for feature, mtype in (
(self.measurement_name(F_LABEL), int),
(cpmeas.OBJECT_NUMBER, int),
(M_LOCATION_CENTER_X, float),
(M_LOCATION_CENTER_Y, float),
(self.measurement_name(F_AREA), float),
(self.measurement_name(F_PARENT_OBJECT_NUMBER), int),
(self.measurement_name(F_PARENT_IMAGE_NUMBER), int)
)]
group_indices, new_object_count, lost_object_count, merge_count, \
split_count = [
np.array([m.get_measurement(cpmeas.IMAGE, feature, i)
for i in image_numbers], int)
for feature in (cpmeas.GROUP_INDEX,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
self.image_measurement_name(F_LOST_OBJECT_COUNT),
self.image_measurement_name(F_MERGE_COUNT),
self.image_measurement_name(F_SPLIT_COUNT))]
#
# Map image number to group index and vice versa
#
image_number_group_index = np.zeros(np.max(image_numbers) + 1, int)
image_number_group_index[image_numbers] = np.array(group_indices, int)
group_index_image_number = np.zeros(np.max(group_indices) + 1, int)
group_index_image_number[group_indices] = image_numbers
if all([len(lll) == 0 for lll in label]):
return # Nothing to do
#sets up the arrays F, L, P, and Q
#F is an array of all the cells that are the starts of segments
# F[:, :2] are the coordinates
# F[:, 2] is the image index
# F[:, 3] is the object index
# F[:, 4] is the object number
# F[:, 5] is the label
# F[:, 6] is the area
# F[:, 7] is the index into P
#L is the ends
#P includes all cells
X = 0
Y = 1
IIDX = 2
OIIDX = 3
ONIDX = 4
LIDX = 5
AIDX = 6
PIDX = 7
P = np.vstack([
np.column_stack((x, y, np.ones(len(x)) * i, np.arange(len(x)),
o, l, area, np.zeros(len(x))))
for i, (x, y, o, l, area)
in enumerate(zip(a, b, object_numbers, label, Area))])
count_per_label = np.bincount(P[:, LIDX].astype(int))
idx = np.hstack([0, np.cumsum(count_per_label)])
unique_label = np.unique(P[:, LIDX].astype(int))
order = np.lexsort((P[:, OIIDX], P[:, IIDX], P[:, LIDX]))
P = P[order, :]
P[:, PIDX] = np.arange(len(P))
F = P[idx[unique_label], :]
L = P[idx[unique_label + 1] - 1, :]
# Creates P1 and P2, which is P without the starts and ends
# of segments respectively, representing possible
# points of merges and splits respectively
P1 = np.delete(P, idx[:-1], 0)
P2 = np.delete(P, idx[1:] - 1, 0)
##################################################
#
# Addresses of supplementary nodes:
#
# The LAP array is composed of the following ranges
#
# Count | node type
# ------------------
# T | segment starts and ends
# T | gaps
# OB | split starts
# OB | merge ends
# M | mitoses
#
# T = # tracks
# OB = # of objects that can serve as merge or split points
# M = # of mitoses
#
# The graph:
#
# Gap Alternatives (in other words, do nothing)
# ----------------------------------------------
# End[i] <----> Gap alternative[i]
# Gap alternative[i] <----> Start[i]
# Split[i] <----> Split[i]
# Merge[j] <----> Merge[j]
# Mitosis[i] <----> Mitosis[i]
#
#
# Bridge gaps:
# -----------------------------------------------
#
# End[i] <---> Start[j]
# Gap alternative[i] <----> Gap alternative[j]
#
# Splits
# -----------------------------------------------
#
# Split[i] <----> Start[j]
# Gap alternative[j] <----> Split[i]
#
# Merges
# -----------------------------------------------
# End[i] <----> Merge[j]
# Merge[j] <----> Gap alternative[i]
#
# Mitoses
# -----------------------------------------------
# The mitosis model is somewhat imperfect. The mitosis
# caps the parent and makes it unavailable as a candidate
# for a gap closing. In the best case, there is only one
# mitosis candidate for the left and right child and
# the left and right child are connected to gap alternatives,
# but there may be competing splits, gap closings or
# other mitoses.
#
# We take a greedy approach, ordering the mitoses by their
# scores and fulfilling them. After processing the mitoses,
# we run LAP again, keeping only the parent nodes of untaken
# mitoses and child nodes connected to gap alternatives
#
# End[i] <----> Mitosis[j]
#
##################################################
end_nodes = []
start_nodes = []
scores = []
#
# The offsets and lengths of the start/end node ranges
#
start_end_off = 0
start_end_len = len(L)
gap_off = start_end_end = start_end_len
gap_end = gap_off + start_end_len
#-------------------------------------------
#
# Null model (do nothing)
#
#-------------------------------------------
for first, second in ((end_nodes, start_nodes),
(start_nodes, end_nodes)):
first.append(np.arange(start_end_len))
second.append(np.arange(start_end_len) + gap_off)
scores.append(np.ones(start_end_len) * gap_cost/2)
#------------------------------------------
#
# Gap-closing model
#
#------------------------------------------
#
# Create the edges between ends and starts.
# The edge weight is the gap pair cost.
#
a, gap_scores = self.get_gap_pair_scores(F, L, max_frame_difference)
# filter by max gap score
mask = gap_scores <= max_gap_score
if np.sum(mask) > 0:
a, gap_scores = a[mask], gap_scores[mask]
end_nodes.append(a[:, 0])
start_nodes.append(a[:, 1])
scores.append(gap_scores)
#
# Hook the gap alternative ends of the starts to
# the gap alternative starts of the ends
#
end_nodes.append(a[:, 1] + gap_off)
start_nodes.append(a[:, 0] + gap_off)
scores.append(np.zeros(len(gap_scores)))
#---------------------------------------------------
#
# Merge model
#
#---------------------------------------------------
#
# The first column of z is the index of the track that ends. The second
# is the index into P2 of the object to be merged into
#
merge_off = gap_end
if len(P1) > 0:
# Do the initial winnowing in chunks of 10m pairs
lchunk_size = 10000000 / len(P1)
chunks = []
for lstart in range(0, len(L), lchunk_size):
lend = min(len(L), lstart+lchunk_size)
merge_p1idx, merge_lidx = \
[_.flatten() for _ in np.mgrid[0:len(P1), lstart:lend]]
z = (P1[merge_p1idx, IIDX] - L[merge_lidx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append([_[mask] for _ in merge_p1idx, merge_lidx, z])
if len(chunks) > 0:
merge_p1idx, merge_lidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
else:
merge_p1idx = merge_lidx = z = np.zeros(0, np.int32)
if len(z) > 0:
# Calculate penalty = distance * area penalty
AreaLast = L[merge_lidx, AIDX]
AreaBeforeMerge = P[P1[merge_p1idx, PIDX].astype(int) - 1, AIDX]
AreaAtMerge = P1[merge_p1idx, AIDX]
rho = self.calculate_area_penalty(
AreaLast + AreaBeforeMerge, AreaAtMerge)
d = np.sqrt(np.sum((L[merge_lidx, :2]-P2[merge_p1idx, :2])**2, 1))
merge_scores = d * rho
mask = merge_scores <= max_merge_score
merge_p1idx, merge_lidx, merge_scores = [
_[mask] for _ in merge_p1idx, merge_lidx, merge_scores]
merge_len = np.sum(mask)
if merge_len > 0:
#
# The end nodes are the ends being merged to the intermediates
# The start nodes are the intermediates and have node #s
# that start at merge_off
#
end_nodes.append(merge_lidx)
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(merge_scores)
#
# Hook the gap alternative starts for the ends to
# the merge nodes
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_lidx + gap_off)
scores.append(np.ones(merge_len) * gap_cost / 2)
#
# The alternative hypothesis is represented by merges hooked
# to merges
#
end_nodes.append(merge_off + np.arange(merge_len))
start_nodes.append(merge_off + np.arange(merge_len))
scores.append(np.ones(merge_len) * merge_alternative_cost)
else:
merge_len = 0
merge_end = merge_off+merge_len
#------------------------------------------------------
#
# Split model
#
#------------------------------------------------------
split_off = merge_end
if len(P2) > 0:
lchunk_size = 10000000 / len(P2)
chunks = []
for fstart in range(0, len(L), lchunk_size):
fend = min(len(L), fstart+lchunk_size)
split_p2idx, split_fidx = \
[_.flatten() for _ in np.mgrid[0:len(P2), fstart:fend]]
z = (F[split_fidx, IIDX] - P2[split_p2idx, IIDX]).astype(np.int32)
mask = (z <= max_frame_difference) & (z > 0)
if np.sum(mask) > 0:
chunks.append(
[_[mask] for _ in split_p2idx, split_fidx, z])
if len(chunks) > 0:
split_p2idx, split_fidx, z = [
np.hstack([_[i] for _ in chunks]) for i in range(3)]
else:
split_p2idx = split_fidx = z = np.zeros(0, np.int32)
else:
split_p2idx = split_fidx = z = np.zeros(0, int)
if len(z) > 0:
AreaFirst = F[split_fidx, AIDX]
AreaAfterSplit = P[ P2[split_p2idx, PIDX].astype(int) + 1, AIDX]
AreaAtSplit = P2[split_p2idx, AIDX]
d = np.sqrt(np.sum((F[split_fidx, :2] - P2[split_p2idx, :2])**2, 1))
rho = self.calculate_area_penalty(
AreaFirst + AreaAfterSplit, AreaAtSplit)
split_scores = d * rho
mask = (split_scores <= max_split_score)
split_p2idx, split_fidx, split_scores = \
[_[mask] for _ in split_p2idx, split_fidx, split_scores]
split_len = np.sum(mask)
if split_len > 0:
#
# The end nodes are the intermediates (starting at split_off)
# The start nodes are the F
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(split_fidx)
scores.append(split_scores)
#
# Hook the alternate ends to the split starts
#
end_nodes.append(split_fidx + gap_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * gap_cost/2)
#
# The alternate hypothesis is split nodes hooked to themselves
#
end_nodes.append(np.arange(split_len) + split_off)
start_nodes.append(np.arange(split_len) + split_off)
scores.append(np.ones(split_len) * split_alternative_cost)
else:
split_len = 0
split_end = split_off + split_len
#----------------------------------------------------------
#
# Mitosis model
#
#----------------------------------------------------------
mitoses, mitosis_scores = self.get_mitotic_triple_scores(F, L)
n_mitoses = len(mitosis_scores)
if n_mitoses > 0:
order = np.argsort(mitosis_scores)
mitoses, mitosis_scores = mitoses[order], mitosis_scores[order]
MDLIDX = 0 # index of left daughter
MDRIDX = 1 # index of right daughter
MPIDX = 2 # index of parent
mitoses_parent_lidx = mitoses[:, MPIDX]
mitoses_left_child_findx = mitoses[:, MDLIDX]
mitoses_right_child_findx = mitoses[:, MDRIDX]
#
# Create the ranges for mitoses
#
mitosis_off = split_end
mitosis_len = n_mitoses
mitosis_end = mitosis_off + mitosis_len
if n_mitoses > 0:
#
# Taking the mitosis score will cost us the parent gap at least.
#
end_nodes.append(mitoses_parent_lidx)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(mitosis_scores)
#
# Balance the mitosis against the gap alternative.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(mitoses_parent_lidx + gap_off)
scores.append(np.ones(n_mitoses) * gap_cost / 2)
#
# The alternative hypothesis links mitosis to mitosis
# We charge the alternative hypothesis the mitosis_alternative
# cost.
#
end_nodes.append(np.arange(n_mitoses) + mitosis_off)
start_nodes.append(np.arange(n_mitoses) + mitosis_off)
scores.append(np.ones(n_mitoses) * mitosis_alternative_cost)
i = np.hstack(end_nodes)
j = np.hstack(start_nodes)
c = scores = np.hstack(scores)
#-------------------------------------------------------
#
# LAP Processing # 1
#
x, y = lapjv(i, j, c)
score_matrix = scipy.sparse.coo.coo_matrix((c, (i, j))).tocsr()
#---------------------------
#
# Useful debugging diagnostics
#
def desc(node):
'''Describe a node for graphviz'''
fl = F
if node < start_end_end:
fmt = "N%d:%d"
idx = node
elif node < gap_end:
fmt = "G%d:%d"
idx = node - gap_off
elif node < merge_end:
fmt = "M%d:%d"
idx = merge_p1idx[node - merge_off]
fl = P1
elif node < split_end:
fmt = "S%d:%d"
idx = split_p2idx[node - split_off]
fl = P2
else:
mitosis = mitoses[node - mitosis_off]
(lin, lon), (rin, ron), (pin, pon) = [
(image_numbers[fl[idx, IIDX]], fl[idx, ONIDX])
for idx, fl in zip(mitosis, (F, F, L))]
return "n%d[label=\"MIT%d:%d->%d:%d+%d:%d\"]" % (
node, pin, pon, lin, lon, rin, ron)
return "n%d[label=\"%s\"]" % (
node, fmt % (image_numbers[int(fl[idx, IIDX])],
int(fl[idx, ONIDX])))
def write_graph(path, x, y):
'''Write a graphviz DOT file'''
with open(path, "w") as fd:
fd.write("digraph trackobjects {\n")
graph_idx = np.where(
(x != np.arange(len(x))) & (y != np.arange(len(y))))[0]
for idx in graph_idx:
fd.write(desc(idx)+";\n")
for idx in graph_idx:
fd.write("n%d -> n%d [label=%0.2f];\n" %
(idx, x[idx], score_matrix[idx, x[idx]]))
fd.write("}\n")
#
#--------------------------------------------------------
#
# Mitosis fixup.
#
good_mitoses = np.zeros(len(mitoses), bool)
for midx, (lidx, ridx, pidx) in enumerate(mitoses):
#
# If the parent was not accepted or either of the children
# have been assigned to a mitosis, skip
#
if x[pidx] == midx + mitosis_off and not \
any([y[idx] >= mitosis_off and y[idx] < mitosis_end
for idx in lidx, ridx]):
alt_score = sum([score_matrix[y[idx], idx] for idx in lidx, ridx])
#
# Taking the alt score would cost us a mitosis alternative
# cost, but would remove half of a gap alternative.
#
alt_score += mitosis_alternative_cost - gap_cost / 2
#
# Alternatively, taking the mitosis score would cost us
# the gap alternatives of the left and right.
#
if alt_score > mitosis_scores[midx] + gap_cost:
for idx in lidx, ridx:
old_y = y[idx]
if old_y < start_end_end:
x[old_y] = old_y + gap_off
else:
x[old_y] = old_y
y[lidx] = midx + mitosis_off
y[ridx] = midx + mitosis_off
good_mitoses[midx] = True
continue
x[pidx] = pidx + gap_off
y[pidx+gap_off] = pidx
x[midx+mitosis_off] = midx+mitosis_off
y[midx+mitosis_off] = midx+mitosis_off
if np.sum(good_mitoses) == 0:
good_mitoses = np.zeros((0, 3), int)
good_mitosis_scores = np.zeros(0)
else:
good_mitoses, good_mitosis_scores = \
mitoses[good_mitoses], mitosis_scores[good_mitoses]
#
#-------------------------------------
#
# Rerun to see if reverted mitoses could close gaps.
#
if np.any(x[mitoses[:, MPIDX]] != np.arange(len(mitoses)) + mitosis_off):
rerun_end = np.ones(mitosis_end, bool)
rerun_start = np.ones(mitosis_end, bool)
rerun_end[:start_end_end] = x[:start_end_end] < mitosis_off
rerun_end[mitosis_off:] = False
rerun_start[:start_end_end] = y[:start_end_end] < mitosis_off
rerun_start[mitosis_off:] = False
mask = rerun_end[i] & rerun_start[j]
i, j, c = i[mask], j[mask], c[mask]
i = np.hstack((i,
good_mitoses[:, MPIDX],
good_mitoses[:, MDLIDX] + gap_off,
good_mitoses[:, MDRIDX] + gap_off))
j = np.hstack((j,
good_mitoses[:, MPIDX] + gap_off,
good_mitoses[:, MDLIDX],
good_mitoses[:, MDRIDX]))
c = np.hstack((c, np.zeros(len(good_mitoses) *3)))
x, y = lapjv(i, j, c)
#
# Fixups to measurements
#
# fixup[N] gets the fixup dictionary for image set, N
#
# fixup[N][FEATURE] gets a tuple of a list of object numbers and
# values.
#
fixups = {}
def add_fixup(feature, image_number, object_number, value):
if image_number not in fixups:
fixups[image_number] = { feature: ([object_number], [value])}
else:
fid = fixups[image_number]
if feature not in fid:
fid[feature] = ([object_number], [value])
else:
object_numbers, values = fid[feature]
object_numbers.append(object_number)
values.append(value)
#attaches different segments together if they are matches through the IAP
a = -np.ones(len(F)+1, dtype="int32")
b = -np.ones(len(F)+1, dtype="int32")
c = -np.ones(len(F)+1, dtype="int32")
d = -np.ones(len(F)+1, dtype="int32")
z = np.zeros(len(F)+1, dtype="int32")
# relationships is a list of parent-child relationships. Each element
# is a two-tuple of parent and child and each parent/child is a
# two-tuple of image index and object number:
#
# [((<parent-image-index>, <parent-object-number>),
# (<child-image-index>, <child-object-number>))...]
#
relationships = []
#
# Starts can be linked to the following:
# ends (start_end_off <= j < start_end_off+start_end_len)
# gap alternatives (gap_off <= j < merge_off+merge_len)
# splits (split_off <= j < split_off+split_len)
# mitosis left (mitosis_left_child_off <= j < ....)
# mitosis right (mitosis_right_child_off <= j < ....)
#
# Discard starts linked to self = "do nothing"
#
start_idxs = np.where(
y[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in start_idxs:
my_image_index = int(F[i, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[i, OIIDX])
my_object_number = int(F[i, ONIDX])
yi = y[i]
if yi < gap_end:
#-------------------------------
#
# GAP
#
# y[i] gives index of last hooked to first
#
b[i+1] = yi+1
c[yi+1] = i+1
#
# Hook our parent image/object number to found parent
#
parent_image_index = int(L[yi, IIDX])
parent_object_number = int(L[yi, ONIDX])
parent_image_number = image_numbers[parent_image_index]
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_GAP)
add_fixup(F_GAP_LENGTH, my_image_number, my_object_number,
my_image_index - parent_image_index)
add_fixup(F_GAP_SCORE, my_image_number, my_object_number,
scores[yi])
#
# One less new object
#
new_object_count[my_image_index] -= 1
#
# One less lost object (the lost object is recorded in
# the image set after the parent)
#
lost_object_count[parent_image_index + 1] -= 1
logger.debug("Gap closing: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
score_matrix[yi, i]))
elif yi >= split_off and yi < split_end:
#------------------------------------
#
# SPLIT
#
p2_idx = split_p2idx[yi - split_off]
parent_image_index = int(P2[p2_idx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(P2[p2_idx, ONIDX])
b[i+1] = P2[p2_idx, LIDX]
c[b[i+1]] = i+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_SPLIT)
add_fixup(F_SPLIT_SCORE, my_image_number, my_object_number,
split_scores[yi - split_off])
#
# one less new object
#
new_object_count[my_image_index] -= 1
#
# one more split object
#
split_count[my_image_index] += 1
logger.debug("split: %d:%d to %d:%d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[my_image_index],
object_numbers[my_image_index][my_object_index],
split_scores[y[i] - split_off]))
#---------------------
#
# Process ends (parents)
#
end_idxs = np.where(
x[:start_end_end] != np.arange(gap_off, gap_end))[0]
for i in end_idxs:
if(x[i] < start_end_end):
a[i+1] = x[i]+1
d[a[i+1]] = i+1
elif(x[i] >= merge_off and x[i] < merge_end):
#-------------------
#
# MERGE
#
# Handle merged objects. A merge hooks the end (L) of
# a segment (the parent) to a gap alternative in P1 (the child)
#
p1_idx = merge_p1idx[x[i]-merge_off]
a[i+1] = P1[p1_idx, LIDX]
d[a[i+1]] = i+1
parent_image_index = int(L[i, IIDX])
parent_object_number = int(L[i, ONIDX])
parent_image_number = image_numbers[parent_image_index]
child_image_index = int(P1[p1_idx, IIDX])
child_object_number = int(P1[p1_idx, ONIDX])
relationships.append(
((parent_image_index, parent_object_number),
(child_image_index, child_object_number)))
add_fixup(F_MERGE_SCORE, parent_image_number,
parent_object_number,
merge_scores[x[i] - merge_off])
lost_object_count[parent_image_index+1] -= 1
merge_count[child_image_index] += 1
logger.debug("Merge: %d:%d to %d:%d, score=%f" %
(image_numbers[parent_image_index]
, parent_object_number,
image_numbers[child_image_index],
child_object_number,
merge_scores[x[i] - merge_off]))
for (mlidx, mridx, mpidx), score in\
zip(good_mitoses, good_mitosis_scores):
#
# The parent is attached, one less lost object
#
lost_object_count[int(L[mpidx, IIDX])+1] -= 1
a[mpidx+1] = F[mlidx, LIDX]
d[a[mpidx+1]] = mpidx+1
parent_image_index = int(L[mpidx, IIDX])
parent_image_number = image_numbers[parent_image_index]
parent_object_number = int(L[mpidx, ONIDX])
split_count[int(F[lidx, IIDX])] += 1
for idx in mlidx, mridx:
#--------------------------------------
#
# MITOSIS child
#
my_image_index = int(F[idx, IIDX])
my_image_number = image_numbers[my_image_index]
my_object_index = int(F[idx, OIIDX])
my_object_number = int(F[idx, ONIDX])
b[idx+1] = int(L[mpidx, LIDX])
c[b[idx+1]] = idx+1
parent_image_numbers[my_image_index][my_object_index] = \
parent_image_number
parent_object_numbers[my_image_index][my_object_index] = \
parent_object_number
relationships.append(
((parent_image_index, parent_object_number),
(my_image_index, my_object_number)))
add_fixup(F_LINK_TYPE, my_image_number, my_object_number,
LT_MITOSIS)
add_fixup(F_MITOSIS_SCORE, my_image_number, my_object_number,
score)
new_object_count[my_image_index] -= 1
logger.debug("Mitosis: %d:%d to %d:%d and %d, score=%f" %
(parent_image_number, parent_object_number,
image_numbers[F[mlidx, IIDX]],
F[mlidx, ONIDX],
F[mridx, ONIDX],
score))
#
# At this point a gives the label # of the track that connects
# to the end of the indexed track. b gives the label # of the
# track that connects to the start of the indexed track.
# We convert these into edges.
#
# aa and bb are the vertices of an edge list and aa[i],bb[i]
# make up an edge
#
connect_mask = (a != -1)
aa = a[connect_mask]
bb = np.argwhere(connect_mask).flatten()
connect_mask = (b != -1)
aa = np.hstack((aa, b[connect_mask]))
bb = np.hstack((bb, np.argwhere(connect_mask).flatten()))
#
# Connect self to self for indices that do not connect
#
disconnect_mask = (a == -1) & (b == -1)
aa = np.hstack((aa, np.argwhere(disconnect_mask).flatten()))
bb = np.hstack((bb, np.argwhere(disconnect_mask).flatten()))
z = all_connected_components(aa, bb)
newlabel = [z[label[i]] for i in range(len(label))]
#
# Replace the labels for the image sets in the group
# inside the list retrieved from the measurements
#
m_link_type = self.measurement_name(F_LINK_TYPE)
for i, image_number in enumerate(image_numbers):
n_objects = len(newlabel[i])
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_LOST_OBJECT_COUNT),
lost_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_NEW_OBJECT_COUNT),
new_object_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_MERGE_COUNT),
merge_count[i], True, image_number)
m.add_measurement(cpmeas.IMAGE,
self.image_measurement_name(F_SPLIT_COUNT),
split_count[i], True, image_number)
if n_objects == 0:
continue
m.add_measurement(object_name,
self.measurement_name(F_LABEL),
newlabel[i], can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_IMAGE_NUMBER),
parent_image_numbers[i],
can_overwrite = True,
image_set_number = image_number)
m.add_measurement(object_name,
self.measurement_name(F_PARENT_OBJECT_NUMBER),
parent_object_numbers[i],
can_overwrite = True,
image_set_number = image_number)
is_fixups = fixups.get(image_number, None)
if (is_fixups is not None) and (F_LINK_TYPE in is_fixups):
link_types = m[object_name, m_link_type, image_number]
object_numbers, values = [
np.array(_) for _ in is_fixups[F_LINK_TYPE]]
link_types[object_numbers-1] = values
m[object_name, m_link_type, image_number] = link_types
for feature, data_type in (
(F_GAP_LENGTH, np.int32),
(F_GAP_SCORE, np.float32),
(F_MERGE_SCORE, np.float32),
(F_SPLIT_SCORE, np.float32),
(F_MITOSIS_SCORE, np.float32)):
if data_type == np.int32:
values = np.zeros(n_objects, data_type)
else:
values = np.ones(n_objects, data_type) * np.NaN
if (is_fixups is not None) and (feature in is_fixups):
object_numbers, fixup_values = [
np.array(_) for _ in is_fixups[feature]]
values[object_numbers-1] = fixup_values
m[object_name, self.measurement_name(feature), image_number] =\
values
#
# Write the relationships.
#
if len(relationships) > 0:
relationships = np.array(relationships)
parent_image_numbers = image_numbers[relationships[:, 0, 0]]
child_image_numbers = image_numbers[relationships[:, 1, 0]]
parent_object_numbers = relationships[:, 0, 1]
child_object_numbers = relationships[:, 1, 1]
m.add_relate_measurement(
self.module_num, R_PARENT, object_name, object_name,
parent_image_numbers, parent_object_numbers,
child_image_numbers, child_object_numbers)
self.recalculate_group(workspace, image_numbers)
def calculate_area_penalty(self, a1, a2):
'''Calculate a penalty for areas that don't match
Ideally, area should be conserved while tracking. We divide the larger
of the two by the smaller of the two to get the area penalty
which is then multiplied by the distance.
Note that this differs from Jaqaman eqn 5 which has an asymmetric
penalty (sqrt((a1 + a2) / b) for a1+a2 > b and b / (a1 + a2) for
a1+a2 < b. I can't think of a good reason why they should be
asymmetric.
'''
result = a1 / a2
result[result < 1] = 1/result[result < 1]
result[np.isnan(result)] = np.inf
return result
def get_gap_pair_scores(self, F, L, max_gap):
'''Compute scores for matching last frame with first to close gaps
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
max_gap - the maximum allowed # of frames between the last and first
Returns: an M x 2 array of M pairs where the first element of the array
is the index of the track whose last frame is to be joined to
the track whose index is the second element of the array.
an M-element vector of scores.
'''
#
# There have to be at least two things to match
#
nothing = (np.zeros((0, 2), int), np.zeros(0))
if F.shape[0] <= 1:
return nothing
X = 0
Y = 1
IIDX = 2
AIDX = 6
#
# Create an indexing ordered by the last frame index and by the first
#
i = np.arange(len(F))
j = np.arange(len(F))
f_iidx = F[:, IIDX].astype(int)
l_iidx = L[:, IIDX].astype(int)
i_lorder = np.lexsort((i, l_iidx))
j_forder = np.lexsort((j, f_iidx))
i = i[i_lorder]
j = j[j_forder]
i_counts = np.bincount(l_iidx)
j_counts = np.bincount(f_iidx)
i_indexes = Indexes([i_counts])
j_indexes = Indexes([j_counts])
#
# The lowest possible F for each L is 1+L
#
j_self = np.minimum(np.arange(len(i_counts)),
len(j_counts) - 1)
j_first_idx = j_indexes.fwd_idx[j_self] + j_counts[j_self]
#
# The highest possible F for each L is L + max_gap. j_end is the
# first illegal value... just past that.
#
j_last = np.minimum(np.arange(len(i_counts)) + max_gap,
len(j_counts)-1)
j_end_idx = j_indexes.fwd_idx[j_last] + j_counts[j_last]
#
# Structure the i and j block ranges
#
ij_counts = j_end_idx - j_first_idx
ij_indexes = Indexes([i_counts, ij_counts])
if ij_indexes.length == 0:
return nothing
#
# The index into L of the first element of the pair
#
ai = i[i_indexes.fwd_idx[ij_indexes.rev_idx] + ij_indexes.idx[0]]
#
# The index into F of the second element of the pair
#
aj = j[j_first_idx[ij_indexes.rev_idx] + ij_indexes.idx[1]]
#
# The distances
#
d = np.sqrt((L[ai, X] - F[aj, X]) ** 2 +
(L[ai, Y] - F[aj, Y]) ** 2)
#
# Rho... the area penalty
#
rho = self.calculate_area_penalty(L[ai, AIDX], F[aj, AIDX])
return np.column_stack((ai, aj)), d * rho
def get_mitotic_triple_scores(self, F, L):
'''Compute scores for matching a parent to two daughters
F - an N x 3 (or more) array giving X, Y and frame # of the first object
in each track
L - an N x 3 (or more) array giving X, Y and frame # of the last object
in each track
Returns: an M x 3 array of M triples where the first column is the
index in the L array of the parent cell and the remaining
columns are the indices of the daughters in the F array
an M-element vector of distances of the parent from the expected
'''
X = 0
Y = 1
IIDX = 2
AIDX = 6
if len(F) <= 1:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
max_distance = self.mitosis_max_distance.value
# Find all daughter pairs within same frame
i, j = np.where(F[:, np.newaxis, IIDX] == F[np.newaxis, :, IIDX])
i, j = i[i < j], j[i < j] # get rid of duplicates and self-compares
#
# Calculate the maximum allowed distance before one or the other
# daughter is farther away than the maximum allowed from the center
#
# That's the max_distance * 2 minus the distance
#
dmax = max_distance * 2 - np.sqrt(np.sum((F[i, :2] - F[j, :2]) ** 2, 1))
mask = dmax >= 0
i, j = i[mask], j[mask]
if len(i) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
center_x = (F[i, X] + F[j, X]) / 2
center_y = (F[i, Y] + F[j, Y]) / 2
frame = F[i, IIDX]
# Find all parent-daughter pairs where the parent
# is in the frame previous to the daughters
ij, k = [_.flatten() for _ in np.mgrid[0:len(i), 0:len(L)]]
mask = F[i[ij], IIDX] == L[k, IIDX]+1
ij, k = ij[mask], k[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
d = np.sqrt((center_x[ij] - L[k, X]) ** 2 +
(center_y[ij] - L[k, Y]) ** 2)
mask = d <= dmax[ij]
ij, k, d = ij[mask], k[mask], d[mask]
if len(ij) == 0:
return np.zeros((0, 3), np.int32), np.zeros(0, np.int32)
rho = self.calculate_area_penalty(
F[i[ij], AIDX] + F[j[ij], AIDX], L[k, AIDX])
return np.column_stack((i[ij], j[ij], k)), d * rho
def recalculate_group(self, workspace, image_numbers):
'''Recalculate all measurements once post_group has run
workspace - the workspace being operated on
image_numbers - the image numbers of the group's image sets' measurements
'''
m = workspace.measurements
object_name = self.object_name.value
assert isinstance(m, cpmeas.Measurements)
image_index = np.zeros(np.max(image_numbers)+1, int)
image_index[image_numbers] = np.arange(len(image_numbers))
image_index[0] = -1
index_to_imgnum = np.array(image_numbers)
parent_image_numbers, parent_object_numbers = [
[ m.get_measurement(
object_name, self.measurement_name(feature), image_number)
for image_number in image_numbers]
for feature in (F_PARENT_IMAGE_NUMBER, F_PARENT_OBJECT_NUMBER)]
#
# Do all_connected_components on the graph of parents to find groups
# that share the same ancestor
#
count = np.array([len(x) for x in parent_image_numbers])
idx = Indexes(count)
if idx.length == 0:
# Nothing to do
return
parent_image_numbers = np.hstack(parent_image_numbers).astype(int)
parent_object_numbers = np.hstack(parent_object_numbers).astype(int)
parent_image_indexes = image_index[parent_image_numbers]
parent_object_indexes = parent_object_numbers - 1
i = np.arange(idx.length)
i = i[parent_image_numbers != 0]
j = idx.fwd_idx[parent_image_indexes[i]] + parent_object_indexes[i]
# Link self to self too
i = np.hstack((i, np.arange(idx.length)))
j = np.hstack((j, np.arange(idx.length)))
labels = all_connected_components(i, j)
nlabels = np.max(labels) + 1
#
# Set the ancestral index for each label
#
ancestral_index = np.zeros(nlabels, int)
ancestral_index[labels[parent_image_numbers == 0]] =\
np.argwhere(parent_image_numbers == 0).flatten().astype(int)
ancestral_image_index = idx.rev_idx[ancestral_index]
ancestral_object_index = \
ancestral_index - idx.fwd_idx[ancestral_image_index]
#
# Blow these up to one per object for convenience
#
ancestral_index = ancestral_index[labels]
ancestral_image_index = ancestral_image_index[labels]
ancestral_object_index = ancestral_object_index[labels]
def start(image_index):
'''Return the start index in the array for the given image index'''
return idx.fwd_idx[image_index]
def end(image_index):
'''Return the end index in the array for the given image index'''
return start(image_index) + idx.counts[0][image_index]
def slyce(image_index):
return slice(start(image_index), end(image_index))
class wrapped(object):
'''make an indexable version of a measurement, with parent and ancestor fetching'''
def __init__(self, feature_name):
self.feature_name = feature_name
self.backing_store = np.hstack([
m.get_measurement(object_name, feature_name, i)
for i in image_numbers])
def __getitem__(self, index):
return self.backing_store[slyce(index)]
def __setitem__(self, index, val):
self.backing_store[slyce(index)] = val
m.add_measurement(object_name, self.feature_name, val,
image_set_number = image_numbers[index],
can_overwrite=True)
def get_parent(self, index, no_parent=None):
result = np.zeros(idx.counts[0][index],
self.backing_store.dtype)
my_slice = slyce(index)
mask = parent_image_numbers[my_slice] != 0
if not np.all(mask):
if np.isscalar(no_parent) or (no_parent is None):
result[~mask] = no_parent
else:
result[~mask] = no_parent[~mask]
if np.any(mask):
result[mask] = self.backing_store[
idx.fwd_idx[parent_image_indexes[my_slice][mask]] +
parent_object_indexes[my_slice][mask]]
return result
def get_ancestor(self, index):
return self.backing_store[ancestral_index[slyce(index)]]
#
# Recalculate the trajectories
#
x = wrapped(M_LOCATION_CENTER_X)
y = wrapped(M_LOCATION_CENTER_Y)
trajectory_x = wrapped(self.measurement_name(F_TRAJECTORY_X))
trajectory_y = wrapped(self.measurement_name(F_TRAJECTORY_Y))
integrated = wrapped(self.measurement_name(F_INTEGRATED_DISTANCE))
dists = wrapped(self.measurement_name(F_DISTANCE_TRAVELED))
displ = wrapped(self.measurement_name(F_DISPLACEMENT))
linearity = wrapped(self.measurement_name(F_LINEARITY))
lifetimes = wrapped(self.measurement_name(F_LIFETIME))
label = wrapped(self.measurement_name(F_LABEL))
final_age = wrapped(self.measurement_name(F_FINAL_AGE))
age = {} # Dictionary of per-label ages
if self.wants_lifetime_filtering.value:
minimum_lifetime = self.min_lifetime.value if self.wants_minimum_lifetime.value else -np.Inf
maximum_lifetime = self.max_lifetime.value if self.wants_maximum_lifetime.value else np.Inf
for image_number in image_numbers:
index = image_index[image_number]
this_x = x[index]
if len(this_x) == 0:
continue
this_y = y[index]
last_x = x.get_parent(index, no_parent=this_x)
last_y = y.get_parent(index, no_parent=this_y)
x_diff = this_x - last_x
y_diff = this_y - last_y
#
# TrajectoryX,Y = X,Y distances traveled from step to step
#
trajectory_x[index] = x_diff
trajectory_y[index] = y_diff
#
# DistanceTraveled = Distance traveled from step to step
#
dists[index] = np.sqrt(x_diff * x_diff + y_diff * y_diff)
#
# Integrated distance = accumulated distance for lineage
#
integrated[index] = integrated.get_parent(index, no_parent=0) + dists[index]
#
# Displacement = crow-fly distance from initial ancestor
#
x_tot_diff = this_x - x.get_ancestor(index)
y_tot_diff = this_y - y.get_ancestor(index)
tot_distance = np.sqrt(x_tot_diff * x_tot_diff +
y_tot_diff * y_tot_diff)
displ[index] = tot_distance
#
# Linearity = ratio of displacement and integrated
# distance. NaN for new cells is ok.
#
linearity[index] = tot_distance / integrated[index]
#
# Add 1 to lifetimes / one for new
#
lifetimes[index] = lifetimes.get_parent(index, no_parent=0) + 1
#
# Age = overall lifetime of each label
#
for this_label, this_lifetime in zip(label[index],lifetimes[index]):
age[this_label] = this_lifetime
all_labels = age.keys()
all_ages = age.values()
if self.wants_lifetime_filtering.value:
labels_to_filter = [k for k, v in age.iteritems() if v <= minimum_lifetime or v >= maximum_lifetime]
for image_number in image_numbers:
index = image_index[image_number]
# Fill in final object ages
this_label = label[index]
this_lifetime = lifetimes[index]
this_age = final_age[index]
ind = np.array(all_labels).searchsorted(this_label)
i = np.array(all_ages)[ind] == this_lifetime
this_age[i] = this_lifetime[i]
final_age[index] = this_age
# Filter object ages below the minimum
if self.wants_lifetime_filtering.value:
if len(labels_to_filter) > 0:
this_label = label[index].astype(float)
this_label[np.in1d(this_label,np.array(labels_to_filter))] = np.NaN
label[index] = this_label
m.add_experiment_measurement(F_EXPT_ORIG_NUMTRACKS, nlabels)
if self.wants_lifetime_filtering.value:
m.add_experiment_measurement(F_EXPT_FILT_NUMTRACKS, nlabels-len(labels_to_filter))
def map_objects(self, workspace, new_of_old, old_of_new, i, j):
'''Record the mapping of old to new objects and vice-versa
workspace - workspace for current image set
new_to_old - an array of the new labels for every old label
old_to_new - an array of the old labels for every new label
i, j - the coordinates for each new object.
'''
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
image_number = m.get_current_image_measurement(cpp.IMAGE_NUMBER)
new_of_old = new_of_old.astype(int)
old_of_new = old_of_new.astype(int)
old_object_numbers = self.get_saved_object_numbers(workspace).astype(int)
max_object_number = self.get_max_object_number(workspace)
old_count = len(new_of_old)
new_count = len(old_of_new)
#
# Record the new objects' parents
#
parents = old_of_new.copy()
parents[parents != 0] =\
old_object_numbers[(old_of_new[parents!=0]-1)].astype(parents.dtype)
self.add_measurement(workspace, F_PARENT_OBJECT_NUMBER, old_of_new)
parent_image_numbers = np.zeros(len(old_of_new))
parent_image_numbers[parents != 0] = image_number - 1
self.add_measurement(workspace, F_PARENT_IMAGE_NUMBER,
parent_image_numbers)
#
# Assign object IDs to the new objects
#
mapping = np.zeros(new_count, int)
if old_count > 0 and new_count > 0:
mapping[old_of_new != 0] = \
old_object_numbers[old_of_new[old_of_new != 0] - 1]
miss_count = np.sum(old_of_new == 0)
lost_object_count = np.sum(new_of_old == 0)
else:
miss_count = new_count
lost_object_count = old_count
nunmapped = np.sum(mapping==0)
new_max_object_number = max_object_number + nunmapped
mapping[mapping == 0] = np.arange(max_object_number+1,
new_max_object_number + 1)
self.set_max_object_number(workspace, new_max_object_number)
self.add_measurement(workspace, F_LABEL, mapping)
self.set_saved_object_numbers(workspace, mapping)
#
# Compute distances and trajectories
#
diff_i = np.zeros(new_count)
diff_j = np.zeros(new_count)
distance = np.zeros(new_count)
integrated_distance = np.zeros(new_count)
displacement = np.zeros(new_count)
linearity = np.ones(new_count)
orig_i = i.copy()
orig_j = j.copy()
old_i, old_j = self.get_saved_coordinates(workspace)
old_distance = self.get_saved_distances(workspace)
old_orig_i, old_orig_j = self.get_orig_coordinates(workspace)
has_old = (old_of_new != 0)
if np.any(has_old):
old_indexes = old_of_new[has_old]-1
orig_i[has_old] = old_orig_i[old_indexes]
orig_j[has_old] = old_orig_j[old_indexes]
diff_i[has_old] = i[has_old] - old_i[old_indexes]
diff_j[has_old] = j[has_old] - old_j[old_indexes]
distance[has_old] = np.sqrt(diff_i[has_old]**2 + diff_j[has_old]**2)
integrated_distance[has_old] = (old_distance[old_indexes] + distance[has_old])
displacement[has_old] = np.sqrt((i[has_old]-orig_i[has_old])**2 + (j[has_old]-orig_j[has_old])**2)
linearity[has_old] = displacement[has_old] / integrated_distance[has_old]
self.add_measurement(workspace, F_TRAJECTORY_X, diff_j)
self.add_measurement(workspace, F_TRAJECTORY_Y, diff_i)
self.add_measurement(workspace, F_DISTANCE_TRAVELED, distance)
self.add_measurement(workspace, F_DISPLACEMENT, displacement)
self.add_measurement(workspace, F_INTEGRATED_DISTANCE, integrated_distance)
self.add_measurement(workspace, F_LINEARITY, linearity)
self.set_saved_distances(workspace, integrated_distance)
self.set_orig_coordinates(workspace, (orig_i, orig_j))
self.set_saved_coordinates(workspace, (i,j))
#
# Update the ages
#
age = np.ones(new_count, int)
if np.any(has_old):
old_age = self.get_saved_ages(workspace)
age[has_old] = old_age[old_of_new[has_old]-1]+1
self.add_measurement(workspace, F_LIFETIME, age)
final_age = np.NaN*np.ones(new_count, float) # Initialize to NaN; will re-calc later
self.add_measurement(workspace, F_FINAL_AGE, final_age)
self.set_saved_ages(workspace, age)
self.set_saved_object_numbers(workspace, mapping)
#
# Add image measurements
#
self.add_image_measurement(workspace, F_NEW_OBJECT_COUNT,
np.sum(parents==0))
self.add_image_measurement(workspace, F_LOST_OBJECT_COUNT,
lost_object_count)
#
# Find parents with more than one child. These are the progenetors
# for daughter cells.
#
if np.any(parents != 0):
h = np.bincount(parents[parents != 0])
split_count = np.sum(h > 1)
else:
split_count = 0
self.add_image_measurement(workspace, F_SPLIT_COUNT, split_count)
#
# Find children with more than one parent. These are the merges
#
if np.any(new_of_old != 0):
h = np.bincount(new_of_old[new_of_old != 0])
merge_count = np.sum(h > 1)
else:
merge_count = 0
self.add_image_measurement(workspace, F_MERGE_COUNT, merge_count)
#########################################
#
# Compile the relationships between children and parents
#
#########################################
last_object_numbers = np.arange(1, len(new_of_old) + 1)
new_object_numbers = np.arange(1, len(old_of_new)+1)
r_parent_object_numbers = np.hstack((
old_of_new[old_of_new != 0],
last_object_numbers[new_of_old != 0]))
r_child_object_numbers = np.hstack((
new_object_numbers[parents != 0], new_of_old[new_of_old != 0]))
if len(r_child_object_numbers) > 0:
#
# Find unique pairs
#
order = np.lexsort((r_child_object_numbers, r_parent_object_numbers))
r_child_object_numbers = r_child_object_numbers[order]
r_parent_object_numbers = r_parent_object_numbers[order]
to_keep = np.hstack((
[True],
(r_parent_object_numbers[1:] != r_parent_object_numbers[:-1]) |
(r_child_object_numbers[1:] != r_child_object_numbers[:-1])))
r_child_object_numbers = r_child_object_numbers[to_keep]
r_parent_object_numbers = r_parent_object_numbers[to_keep]
r_image_numbers = np.ones(
r_parent_object_numbers.shape[0],
r_parent_object_numbers.dtype) * image_number
if len(r_child_object_numbers) > 0:
m.add_relate_measurement(
self.module_num, R_PARENT,
self.object_name.value, self.object_name.value,
r_image_numbers - 1, r_parent_object_numbers,
r_image_numbers, r_child_object_numbers)
def get_kalman_feature_names(self):
if self.tracking_method != TM_LAP:
return []
return sum(
[sum(
[[ kalman_feature(model, F_STATE, element),
kalman_feature(model, F_NOISE, element)] +
[ kalman_feature(model, F_COV, element, e2)
for e2 in elements]
for element in elements],[])
for model, elements in self.get_kalman_models()], [])
def get_measurement_columns(self, pipeline):
result = [(self.object_name.value,
self.measurement_name(feature),
coltype)
for feature, coltype in F_ALL_COLTYPE_ALL]
result += [(cpmeas.IMAGE, self.image_measurement_name(feature), coltype)
for feature, coltype in F_IMAGE_COLTYPE_ALL]
if self.tracking_method == TM_LAP:
result += [( self.object_name.value,
self.measurement_name(name),
coltype) for name, coltype in (
(F_AREA, cpmeas.COLTYPE_INTEGER),
(F_LINK_TYPE, cpmeas.COLTYPE_INTEGER),
(F_LINKING_DISTANCE, cpmeas.COLTYPE_FLOAT),
(F_STANDARD_DEVIATION, cpmeas.COLTYPE_FLOAT),
(F_MOVEMENT_MODEL, cpmeas.COLTYPE_INTEGER))]
result += [( self.object_name.value,
self.measurement_name(name),
cpmeas.COLTYPE_FLOAT) for name in
list(self.get_kalman_feature_names())]
if self.wants_second_phase:
result += [
(self.object_name.value, self.measurement_name(name), coltype)
for name, coltype in (
(F_GAP_LENGTH, cpmeas.COLTYPE_INTEGER),
(F_GAP_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MERGE_SCORE, cpmeas.COLTYPE_FLOAT),
(F_SPLIT_SCORE, cpmeas.COLTYPE_FLOAT),
(F_MITOSIS_SCORE, cpmeas.COLTYPE_FLOAT))]
# Add the post-group attribute to all measurements
attributes = { cpmeas.MCA_AVAILABLE_POST_GROUP: True }
result = [ ( c[0], c[1], c[2], attributes) for c in result]
return result
def get_object_relationships(self, pipeline):
'''Return the object relationships produced by this module'''
object_name = self.object_name.value
if self.wants_second_phase and self.tracking_method == TM_LAP:
when = cpmeas.MCA_AVAILABLE_POST_GROUP
else:
when = cpmeas.MCA_AVAILABLE_EACH_CYCLE
return [(R_PARENT, object_name, object_name, when)]
def get_categories(self, pipeline, object_name):
if object_name in (self.object_name.value, cpmeas.IMAGE):
return [F_PREFIX]
elif object_name == cpmeas.EXPERIMENT:
return [F_PREFIX]
else:
return []
def get_measurements(self, pipeline, object_name, category):
if object_name == self.object_name.value and category == F_PREFIX:
result = list(F_ALL)
if self.tracking_method == TM_LAP:
result += [F_AREA, F_LINKING_DISTANCE, F_STANDARD_DEVIATION,
F_LINK_TYPE, F_MOVEMENT_MODEL]
if self.wants_second_phase:
result += [F_GAP_LENGTH, F_GAP_SCORE, F_MERGE_SCORE,
F_SPLIT_SCORE, F_MITOSIS_SCORE]
result += self.get_kalman_feature_names()
return result
if object_name == cpmeas.IMAGE:
result = F_IMAGE_ALL
return result
if object_name == cpmeas.EXPERIMENT and category == F_PREFIX:
return [F_EXPT_ORIG_NUMTRACKS, F_EXPT_FILT_NUMTRACKS]
return []
def get_measurement_objects(self, pipeline, object_name, category,
measurement):
if (object_name == cpmeas.IMAGE and category == F_PREFIX and
measurement in F_IMAGE_ALL):
return [ self.object_name.value]
return []
def get_measurement_scales(self, pipeline, object_name, category, feature,image_name):
if self.tracking_method == TM_LAP:
return []
if feature in self.get_measurements(pipeline, object_name, category):
return [str(self.pixel_radius.value)]
return []
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if from_matlab and variable_revision_number == 3:
wants_image = setting_values[10] != cps.DO_NOT_USE
measurement = '_'.join(setting_values[2:6])
setting_values = [ setting_values[0], # tracking method
setting_values[1], # object name
measurement,
setting_values[6], # pixel_radius
setting_values[7], # display_type
wants_image,
setting_values[10]]
variable_revision_number = 1
from_matlab = False
if (not from_matlab) and variable_revision_number == 1:
setting_values = setting_values + ["100","100"]
variable_revision_number = 2
if (not from_matlab) and variable_revision_number == 2:
# Added phase 2 parameters
setting_values = setting_values + [
"40","40","40","50","50","50","5"]
variable_revision_number = 3
if (not from_matlab) and variable_revision_number == 3:
# Added Kalman choices:
# Model
# radius std
# radius limit
setting_values = (setting_values[:7] +
[ M_BOTH, "3", "2,10"] +
setting_values[9:])
variable_revision_number = 4
if (not from_matlab) and variable_revision_number == 4:
# Added lifetime filtering: Wants filtering + min/max allowed lifetime
setting_values = setting_values + [cps.NO, cps.YES, "1", cps.NO, "100"]
variable_revision_number = 5
if (not from_matlab) and variable_revision_number == 5:
# Added mitosis alternative score + mitosis_max_distance
setting_values = setting_values + ["80", "40"]
variable_revision_number = 6
return setting_values, variable_revision_number, from_matlab
| gpl-2.0 |
jarvis-fga/Projetos | Problema 4/Python/matplot_kmean.py | 1 | 3623 | import time
import pandas
import numpy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.manifold import TSNE
features = [
"mean_of_the_integrated_profile",
"standard_deviation_of_the_integrated_profile",
"excess_kurtosis_of_the_integrated_profile",
"skewness_of_the_integrated_profile",
"mean_of_the_DM-SNR_curve",
"standard_deviation_of_the_DM-SNR_curve",
"excess_kurtosis_of_the_DM-SNR_curve",
"skewness_of_the_DM-SNR_curve",
"class"
]
data = pandas.read_csv('data/HTRU_2.csv', sep=",", names=features)
labels = data['class']
# #############################################################################
batch_size = 45
centers = [[1, 1], [-1, -1]]
n_clusters = len(centers)
X = TSNE(n_components=2).fit_transform(data)
# #############################################################################
# Means
k_means = KMeans(init='k-means++', n_clusters=2, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=2, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| mit |
vlukes/sfepy | examples/diffusion/poisson_parallel_interactive.py | 4 | 19203 | #!/usr/bin/env python
r"""
Parallel assembling and solving of a Poisson's equation, using commands for
interactive use.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u
= \int_{\Omega} v f
\;, \quad \forall s \;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example does not use a nonlinear solver.
- This example can serve as a template for solving a linear single-field scalar
problem - just replace the equations in :func:`create_local_problem()`.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/diffusion/poisson_parallel_interactive.py -h
See PETSc options::
$ python examples/diffusion/poisson_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/diffusion/poisson_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 5 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --verify --metis -ksp_monitor -ksp_converged_reason
View the results using::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import csv
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.timing import Timer
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.evaluate import apply_ebc_to_matrix
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
import sfepy.parallel.parallel as pl
import sfepy.parallel.plot_parallel_dofs as ppd
def create_local_problem(omega_gi, order):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
field_i = Field.from_args('fu', nm.float64, 1, omega_i,
approx_order=order)
output('number of local field DOFs:', field_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field_i)
v_i = FieldVariable('v_i', 'test', field_i, primary_var_name='u_i')
integral = Integral('i', order=2*order)
mat = Material('m', lam=10, mu=5)
t1 = Term.new('dw_laplace(m.lam, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
def _get_load(coors):
val = nm.ones_like(coors[:, 0])
for coor in coors.T:
val *= nm.sin(4 * nm.pi * coor)
return val
def get_load(ts, coors, mode=None, **kwargs):
if mode == 'qp':
return {'val' : _get_load(coors).reshape(coors.shape[0], 1, 1)}
load = Material('load', function=Function('get_load', get_load))
t2 = Term.new('dw_volume_lvf(load.val, v_i)',
integral, omega_i, load=load, v_i=v_i)
eq = Equation('balance', t1 - 100 * t2)
eqs = Equations([eq])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.all' : 0.1})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2]))
pb.update_materials()
return pb
def verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options,
verbose=False):
vec = pl.verify_task_dof_maps(dof_maps, id_map, field, verbose=verbose)
order = options.order
mesh = field.domain.mesh
sfield = Field.from_args('aux', nm.float64, 'scalar', field.region,
approx_order=order)
aux = FieldVariable('aux', 'parameter', sfield,
primary_var_name='(set-to-None)')
out = aux.create_output(vec,
linearization=Struct(kind='adaptive',
min_level=order-1,
max_level=order-1,
eps=1e-8))
filename = os.path.join(options.output_dir,
'para-domains-dofs.h5')
if field.is_higher_order():
out['aux'].mesh.write(filename, out=out)
else:
mesh.write(filename, out=out)
out = Struct(name='cells', mode='cell',
data=cell_tasks[:, None, None, None])
filename = os.path.join(options.output_dir,
'para-domains-cells.h5')
mesh.write(filename, out={'cells' : out})
def solve_problem(mesh_filename, options, comm):
order = options.order
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
stats = Struct()
timer = Timer('solve_timer')
timer.start()
mesh = Mesh.from_file(mesh_filename)
stats.t_read_mesh = timer.stop()
timer.start()
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
stats.t_partition_mesh = timer.stop()
output('creating global domain and field...')
timer.start()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
stats.t_create_global_fields = timer.stop()
output('...done in', timer.dt)
output('distributing field %s...' % field.name)
timer.start()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute([field], cell_tasks,
is_overlap=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
lfd = lfds[0]
stats.t_distribute_fields_dofs = timer.stop()
output('...done in', timer.dt)
if rank == 0:
dof_maps = gfds[0].dof_maps
id_map = gfds[0].id_map
if options.verify:
verify_save_dof_maps(field, cell_tasks,
dof_maps, id_map, options, verbose=True)
if options.plot:
ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0],
options.output_dir, size)
output('creating local problem...')
timer.start()
omega_gi = Region.from_cells(lfd.cells, field.domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, order)
variables = pb.get_variables()
eqs = pb.equations
u_i = variables['u_i']
field_i = u_i.field
stats.t_create_local_problem = timer.stop()
output('...done in', timer.dt)
if options.plot:
ppd.plot_local_dofs([None, None], field, field_i, omega_gi,
options.output_dir, rank)
output('allocating global system...')
timer.start()
sizes, drange = pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1)
output('sizes:', sizes)
output('drange:', drange)
pdofs = pl.get_local_ordering(field_i, lfd.petsc_dofs_conn)
output('pdofs:', pdofs)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
stats.t_allocate_global_system = timer.stop()
output('...done in', timer.dt)
output('evaluating local problem...')
timer.start()
state = State(variables)
state.fill(0.0)
state.apply_ebc()
rhs_i = eqs.eval_residuals(state())
# This must be after pl.create_petsc_system() call!
mtx_i = eqs.eval_tangent_matrices(state(), pb.mtx_a)
stats.t_evaluate_local_problem = timer.stop()
output('...done in', timer.dt)
output('assembling global system...')
timer.start()
apply_ebc_to_matrix(mtx_i, u_i.eq_map.eq_ebc)
pl.assemble_rhs_to_petsc(prhs, rhs_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
pl.assemble_mtx_to_petsc(pmtx, mtx_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
stats.t_assemble_global_system = timer.stop()
output('...done in', timer.dt)
output('creating solver...')
timer.start()
conf = Struct(method='cg', precond='gamg', sub_precond='none',
i_max=10000, eps_a=1e-50, eps_r=1e-5, eps_d=1e4, verbose=True)
status = {}
ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status)
stats.t_create_solver = timer.stop()
output('...done in', timer.dt)
output('solving...')
timer.start()
psol = ls(prhs, psol)
psol_i = pl.create_local_petsc_vector(pdofs)
gather, scatter = pl.create_gather_scatter(pdofs, psol_i, psol, comm=comm)
scatter(psol_i, psol)
sol0_i = state() - psol_i[...]
psol_i[...] = sol0_i
gather(psol, psol_i)
stats.t_solve = timer.stop()
output('...done in', timer.dt)
output('saving solution...')
timer.start()
u_i.set_data(sol0_i)
out = u_i.create_output()
filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank)
pb.domain.mesh.write(filename, io='auto', out=out)
gather_to_zero = pl.create_gather_to_zero(psol)
psol_full = gather_to_zero(psol)
if comm.rank == 0:
sol = psol_full[...].copy()[id_map]
u = FieldVariable('u', 'parameter', field,
primary_var_name='(set-to-None)')
filename = os.path.join(options.output_dir, 'sol.h5')
if (order == 1) or (options.linearization == 'strip'):
out = u.create_output(sol)
mesh.write(filename, io='auto', out=out)
else:
out = u.create_output(sol, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order,
eps=1e-3))
out['u'].mesh.write(filename, io='auto', out=out)
stats.t_save_solution = timer.stop()
output('...done in', timer.dt)
stats.t_total = timer.total
stats.n_dof = sizes[1]
stats.n_dof_local = sizes[0]
stats.n_cell = omega.shape.n_cell
stats.n_cell_local = omega_gi.shape.n_cell
if options.show:
plt.show()
return stats
def save_stats(filename, pars, stats, overwrite, rank, comm=None):
out = stats.to_dict()
names = sorted(out.keys())
shape_dict = {'n%d' % ii : pars.shape[ii] for ii in range(pars.dim)}
keys = ['size', 'rank', 'dim'] + list(shape_dict.keys()) + ['order'] + names
out['size'] = comm.size
out['rank'] = rank
out['dim'] = pars.dim
out.update(shape_dict)
out['order'] = pars.order
if rank == 0 and overwrite:
with open(filename, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writeheader()
writer.writerow(out)
else:
with open(filename, 'a') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writerow(out)
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'2d' :
'generate a 2D rectangle, the third components of the above'
' options are ignored',
'order' :
'field approximation order',
'linearization' :
'linearization used for storing the results with approximation order > 1'
' [default: %(default)s]',
'metis' :
'use metis for domain partitioning',
'verify' :
'verify domain partitioning, save cells and DOFs of tasks'
' for visualization',
'plot' :
'make partitioning plots',
'save_inter_regions' :
'save inter-task regions for debugging partitioning problems',
'show' :
'show partitioning plots (implies --plot)',
'stats_filename' :
'name of the stats file for storing elapsed time statistics',
'new_stats' :
'create a new stats file with a header line (overwrites existing!)',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory'
' (DANGEROUS - use with care!)',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='11,11,11', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-2', '--2d',
action='store_true', dest='is_2d',
default=False, help=helps['2d'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--linearization', choices=['strip', 'adaptive'],
action='store', dest='linearization',
default='strip', help=helps['linearization'])
parser.add_argument('--metis',
action='store_true', dest='metis',
default=False, help=helps['metis'])
parser.add_argument('--verify',
action='store_true', dest='verify',
default=False, help=helps['verify'])
parser.add_argument('--plot',
action='store_true', dest='plot',
default=False, help=helps['plot'])
parser.add_argument('--show',
action='store_true', dest='show',
default=False, help=helps['show'])
parser.add_argument('--save-inter-regions',
action='store_true', dest='save_inter_regions',
default=False, help=helps['save_inter_regions'])
parser.add_argument('--stats', metavar='filename',
action='store', dest='stats_filename',
default=None, help=helps['stats_filename'])
parser.add_argument('--new-stats',
action='store_true', dest='new_stats',
default=False, help=helps['new_stats'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
options, petsc_opts = parser.parse_known_args()
if options.show:
options.plot = True
comm = pl.PETSc.COMM_WORLD
output_dir = options.output_dir
filename = os.path.join(output_dir, 'output_log_%02d.txt' % comm.rank)
if comm.rank == 0:
ensure_path(filename)
comm.barrier()
output.prefix = 'sfepy_%02d:' % comm.rank
output.set_output(filename=filename, combined=options.silent == False)
output('petsc options:', petsc_opts)
mesh_filename = os.path.join(options.output_dir, 'para.h5')
dim = 2 if options.is_2d else 3
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
if comm.rank == 0:
from sfepy.mesh.mesh_generators import gen_block_mesh
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.mesh', '*.txt', '*.png'],
ignores=['output_log_%02d.txt' % ii
for ii in range(comm.size)],
verbose=True)
save_options(os.path.join(output_dir, 'options.txt'),
[('options', vars(options))])
mesh = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
mesh.write(mesh_filename, io='auto')
comm.barrier()
output('field order:', options.order)
stats = solve_problem(mesh_filename, options, comm)
output(stats)
if options.stats_filename:
if comm.rank == 0:
ensure_path(options.stats_filename)
comm.barrier()
pars = Struct(dim=dim, shape=shape, order=options.order)
pl.call_in_rank_order(
lambda rank, comm:
save_stats(options.stats_filename, pars, stats, options.new_stats,
rank, comm),
comm
)
if __name__ == '__main__':
main()
| bsd-3-clause |
letsgoexploring/linearsolve-package | linearsolve/__init__.py | 1 | 32412 | from __future__ import division,print_function
import numpy as np
import scipy.linalg as la
from statsmodels.tools.numdiff import approx_fprime_cs
from scipy.optimize import root,fsolve,broyden1,broyden2
import pandas as pd
import sys
class model:
'''Defines a class -- linearsolve.model -- with associated methods for solving and simulating dynamic
stochastic general equilibrium (DSGE) models.'''
def __init__(self,equations=None,n_states=None,var_names=None,shock_names=None,parameters=None,parameter_names=None):
'''Initializing an instance linearsolve.model requires values for the following variables:
Args:
equations: (fun) A function that represents the equilibirum conditions for a DSGE model.
The function should accept three arguments:
* vars_fwd: endogenous variables dated t+1
* vars_cur: endogenous variables dated t
* parameters: the parameters of the model
The function should return an n-dimensional array with each element of
the returned array being equaling an equilibrium condition of the model
solved for zero.
n_states: (int) The number of state variables in the model.
var_names: (list) A list of strings with the names of the endogenous variables. The
state variables must be ordered first.
shock_names: (list) A list of strings with the names of the exogenous shocks to each state
variable. The order of names must agree with var_names.
parameters: (list or Pandas Series) Either a list of parameter values OR a Pandas Series
object with parameter name strings as the index.
parameter_names: (list) Optional. If parameters is given as a list, then this list of strings
will be used to save the parameters with names as a Pandas Series object.
Returns:
None
Attributes:
equilibrium_fun: (fun) Function that returns the equilibrium comditions of the model.
n_vars: (int) The number of variables in the model.
n_states: (int) The number of state variables in the model.
n_costates: (int) The number of costate or control variables in the model.
names: (dict) A dictionary with keys 'variables', 'shocks', and 'param' that
stores the names of the model's variables, shocks, and parameters.
parameters: (Pandas Series) A Pandas Series with parameter name strings as the
index. If parameter_names wasn't supplied, then parameters are labeled
'parameter 1', 'parameter2', etc.
'''
self.equilibrium_fun= equations
self.n_vars = len(var_names)
self.n_states = n_states
self.n_costates=self.n_vars-n_states
names = {}
names['variables'] = var_names
if shock_names is not None:
if len(shock_names)<self.n_states:
shock_names_temp = []
for i in range(self.n_states):
try:
shock_names_temp.append(shock_names[i])
except:
shock_names_temp.append('e_'+var_names[i])
shock_names = shock_names_temp
else:
shock_names = []
for i in range(self.n_states):
shock_names.append('e_'+var_names[i])
names['shocks'] = shock_names
if isinstance(parameters,pd.Series):
self.parameters = parameters
else:
if parameter_names is None:
parameter_names = ['parameter '+str(i+1) for i in range(len(parameters))]
self.parameters = pd.Series(parameters,index=parameter_names)
names['param'] = parameter_names
self.names = names
# Methods
def approximate_and_solve(self,log_linear=True):
'''Method approximates and solves a dynamic stochastic general equilibrium (DSGE) model by
constructing the log-linear approximation (if the model isn't log-linear) and solving the model
using Klein's (2000) method.
Args:
log_linear: (bool) Whether to compute log-linear or linear approximation. Default: True
Returns:
None
Attributes:
a: (Numpy ndarray) Coefficient matrix on forward-dated variables.
b: (Numpy ndarray) Coefficient matrix on current-dated variables.
f: (Numpy ndarray) Solution matrix coeffients on s(t) in control equation.
p: (Numpy ndarray) Solution matrix coeffients on s(t) in state equation.
stab: (int) Indicates solution stability and uniqueness
stab == 1: too many stable eigenvalues
stab == -1: too few stable eigenvalues
stab == 0: just enoughstable eigenvalues
eig: The generalized eigenvalues from the Schur decomposition
log_linear: (bool) Whether the model is log-linear. Sets to log-linear.
'''
# Set attribute
self.log_linear = log_linear
# Approximate
if log_linear == True:
self.log_linear_approximation()
else:
self.linear_approximation()
# Solve the model
self.solve_klein(self.a,self.b)
def approximated(self,round=True,precision=4):
'''Returns a string containing the log-linear approximation to the equilibrium conditions
Args:
round: (bool) Whether to round the coefficents in the linear equations. Default: True
precision: (int) Number of decimals to round the coefficients. Default: 4
Returns:
String with the log-linear approximation to the equilibrium conditions.
Attributes:
None
'''
if round is True:
a = np.round(self.a,precision)
b = np.round(self.b,precision)
leftsides = []
rightsides = []
if self.log_linear==True:
lines ='Log-linear equilibrium conditions:\n\n'
else:
lines ='Linear equilibrium conditions:\n\n'
left_length = 1
for i in range(self.n_vars):
left = ''
right = ''
left_plus_flag = 0
right_plus_flag = 0
if all(np.isclose(0,a[i])):
left += '0'
else:
for j in range(self.n_vars):
if not np.isclose(0,a[i][j]):
name = self.names['variables'][j]
if j >self.n_states-1:
name +='[t+1|t]'
else:
name +='[t+1]'
if np.isclose(1,a[i][j]):
coeff = ''
elif np.isclose(-1,a[i][j]):
coeff = '-'
else:
coeff = str(a[i][j])+'·'
if left_plus_flag == 0:
left += coeff+name
left_plus_flag+=1
else:
if a[i][j] > 0:
left += '+'+coeff+name
else:
left += coeff+name
if all(np.isclose(0,b[i])):
right += '0'
else:
for j in range(self.n_vars):
if not np.isclose(0,b[i][j]):
name = self.names['variables'][j]+'[t]'
if np.isclose(1,b[i][j]):
coeff = ''
elif np.isclose(-1,b[i][j]):
coeff = '-'
else:
coeff = str(b[i][j])+'·'
if right_plus_flag == 0:
right += coeff+name
right_plus_flag+=1
else:
if b[i][j] > 0:
right += '+'+coeff+name
else:
right += coeff+name
leftsides.append(left)
rightsides.append(right)
if len(left)>left_length:
left_length = len(left)
for i in range(self.n_vars):
leftsides[i] = leftsides[i].rjust(left_length)
lines+=leftsides[i]+' = '+rightsides[i]+'\n\n'
lines = lines[:-2]
return lines
def check_ss(self):
'''Uses Numpy.isclose() to print whether each steady state equilibrium condition evaluates to
something close to zero.
Args:
None
Returns:
bool
Attributes:
None
'''
try:
print(np.isclose(self.equilibrium_fun(self.ss,self.ss,self.parameters),0))
except:
print('Set the steady state first.')
def compute_ss(self,guess=None,method='fsolve',options={}):
'''Attempts to solve for the steady state of the model.
Args:
guess: (Pandas Series, Numpy array, or list) An initial guess for the
steady state solution. The result is highly sensisitve to the intial
guess chosen, so be careful. If the guess is a Numpy ndarray or a list
then the elements must be ordered to conform with self.names['variables'].
method: (str) The function from the Scipy library to use. Your choices are:
a. root
b. fsolve (default)
c. broyden1
d. broyden2
options: (dict) A dictionary of optional arguments to pass to the numerical solver.
Check out the Scipy documentation to see the options available for each routine:
http://docs.scipy.org/doc/scipy/reference/optimize.html
Returns:
None
Attributes:
ss: (Pandas Series) Steady state values of endogenous variables
'''
if guess is None:
guess = np.ones(self.n_vars)
else:
if isinstance(guess, pd.Series):
guess = guess[self.names['variables']]
# Create function for nonlinear solver
def ss_fun(variables):
variables = pd.Series(variables,index = self.names['variables'])
return self.equilibrium_fun(variables,variables,self.parameters)
if method == 'fsolve':
steady_state =fsolve(ss_fun,guess,**options)
elif method == 'root':
steady_state =root(ss_fun,guess,**options)['x']
elif method == 'broyden1':
steady_state =broyden1(ss_fun,guess,**options)
elif method == 'broyden2':
steady_state =broyden2(ss_fun,guess,**options)
# Add ss attribute
self.ss = pd.Series(steady_state,index=self.names['variables'])
def impulse(self,T=51,t0=1,shocks=None,percent=False,diff=True):
'''Computes impulse responses for shocks to each state variable.
Arguments:
T: (int) Number of periods to simulate. Default: 51
t0: (int) Period in which the shocks are realized. Must be greater than or equal to
0. Default: 1
shocks: (list or Numpy array) An (ns x 1) array of shock values. If shocks==None, and
log_linear==True, shocks is set to a vector of 0.01s. If shocks==None and
log_linear==False, shocks is set to a vector of 1s. Default = None
percent: (bool) Whether to multiply simulated values by 100. Only works for log-linear
approximations. Default: False
diff: (bool) Subtract steady state for linear approximations (or log steady state for
log-linear approximations). Default: True
Returns
None
Attributes:
irs: (dict) A dictionary containing Pandas DataFrames. Has the form:
self.irs['shock name']['endog var name']
'''
# Initialize dictionary
irsDict = {}
# Set numbers of costate and state variables
n_costates = self.n_costates
n_states = self.n_states
# iterate over all shocks, compute impulse responses, and add results to dictionary
for j,name in enumerate(self.names['shocks']):
s0 = np.zeros([1,n_states])
eps= np.zeros([T,n_states])
if shocks is not None:
try:
eps[t0][j] = shocks[name]
except:
try:
eps[t0][j] = shocks[j]
except:
if self.log_linear:
eps[t0][j] = 0.01
else:
eps[t0][j] = 1
else:
if self.log_linear:
eps[t0][j] = 0.01
else:
eps[t0][j] = 1
x = ir(self.f,self.p,eps,s0)
frameDict = {self.names['shocks'][j]:eps.T[j]}
for i,endoName in enumerate(self.names['variables']):
if diff:
frameDict[endoName] = x[i]
else:
if not self.log_linear:
frameDict[endoName] = x[i] + self.ss[endoName]
else:
frameDict[endoName] = x[i] + np.log(self.ss[endoName])
irFrame = pd.DataFrame(frameDict,index = np.arange(T))
if percent==True and self.log_linear:
irFrame = 100*irFrame
if shocks is None or len(shocks)>j:
irsDict[self.names['shocks'][j]] = irFrame
# Set attribute
self.irs = irsDict
def linear_approximation(self,steady_state=None):
''' Given a nonlinear rational expectations model in the form:
psi_1[x(t+1),x(t)] = psi_2[x(t+1),x(t)]
this method returns the linear approximation of the model with matrices a and b such that:
a * y(t+1) = b * y(t)
where y(t) = x(t) - x is the log deviation of the vector x from its steady state value.
Args:
steady_state: (Pandas Series or numpy array or list)
Returns:
None
Attributes:
log_linear: (bool) Whether the model is log-linear. Sets to False.
a: (Numpy ndarray)
b: (Numpy ndarray)
'''
# Set log_linear attribute
self.log_linear=False
# Warn if steady state attribute ss has not been assigned
if steady_state is None:
try:
steady_state = self.ss
except :
raise ValueError('You must specify a steady state for the model before attempting to linearize.')
# Compute approximation
def equilibrium(vars_fwd,vars_cur):
vars_fwd = pd.Series(vars_fwd,index = self.names['variables'])
vars_cur = pd.Series(vars_cur,index = self.names['variables'])
equilibrium_left = self.equilibrium_fun(vars_fwd,vars_cur,self.parameters)
equilibrium_right = np.ones(len(self.names['variables']))
return equilibrium_left - equilibrium_right
equilibrium_fwd = lambda fwd: equilibrium(fwd,steady_state)
equilibrium_cur = lambda cur: equilibrium(steady_state,cur)
# Assign attributes
self.a= approx_fprime_cs(steady_state.ravel(),equilibrium_fwd)
self.b= -approx_fprime_cs(steady_state.ravel(),equilibrium_cur)
def log_linear_approximation(self,steady_state=None):
''' Given a nonlinear rational expectations model in the form:
psi_1[x(t+1),x(t)] = psi_2[x(t+1),x(t)]
this method returns the log-linear approximation of the model with matrices a and b such that:
a * y(t+1) = b * y(t)
where y(t) = log x(t) - log x is the log deviation of the vector x from its steady state value.
Args:
steady_state: (Pandas Series or numpy array)
Returns:
None
Attributes:
log_linear: (bool) Whether the model is log_linear. Sets to True.
a: (Numpy ndarray)
b: (Numpy ndarray)
'''
# Set log_linear attribute
self.log_linear=True
# Warn if steady state attribute ss has not been assigned
if steady_state is None:
try:
steady_state = self.ss
except :
raise ValueError('You must specify a steady state for the model before attempting to linearize.')
# Compute approximation
def log_equilibrium(log_vars_fwd,log_vars_cur):
log_vars_fwd = pd.Series(log_vars_fwd,index = self.names['variables'])
log_vars_cur = pd.Series(log_vars_cur,index = self.names['variables'])
equilibrium_left = self.equilibrium_fun(np.exp(log_vars_fwd),np.exp(log_vars_cur),self.parameters)+1
equilibrium_right = np.ones(len(self.names['variables']))
return np.log(equilibrium_left) - np.log(equilibrium_right)
log_equilibrium_fwd = lambda log_fwd: log_equilibrium(log_fwd,np.log(steady_state))
log_equilibrium_cur = lambda log_cur: log_equilibrium(np.log(steady_state),log_cur)
# Assign attributes
self.a= approx_fprime_cs(np.log(steady_state).ravel(),log_equilibrium_fwd)
self.b= -approx_fprime_cs(np.log(steady_state).ravel(),log_equilibrium_cur)
def set_ss(self,steady_state):
'''Directly set the steady state of the model.
Args:
steady_state: (Pandas Series, Numpy array, or list)
Returns:
None
Attributes:
ss: (Pandas Series) Steady state values of endogenous variables
'''
try:
self.ss = steady_state[self.names['variables']]
except:
self.ss = pd.Series(steady_state,index=self.names['variables'])
def solve_klein(self,a=None,b=None):
'''Solves a linear rational expectations model of the form:
a * x(t+1) = b * x(t) + e(t)
The method returns the solution to the law of motion:
u(t) = f*s(t) + e(t)
s(t+1) = p*s(t)
Args:
a: (Numpy ndarray) coefficient matrix
b: (Numpy ndarray) coefficient matrix
Returns:
None
Attributes:
f: (Numpy ndarray) Solution matrix coeffients on s(t)
p: (Numpy ndarray) Solution matrix coeffients on s(t)
stab: (int) Indicates solution stability and uniqueness
stab == 1: too many stable eigenvalues
stab == -1: too few stable eigenvalues
stab == 0: just enough stable eigenvalues
eig: The generalized eigenvalues from the Schur decomposition
'''
if a is None and b is None:
a = self.a
b = self.b
self.f,n,self.p,l,self.stab,self.eig = klein(a=a,b=b,c=None,phi=None,n_states=self.n_states)
def solved(self,round=True,precision=4):
'''Returns a string containing the solution to the linear system
Args:
round: (bool) Whether to round the coefficents in the solution equations. Default: True
precisions: (int) Number of decimals to round the coefficients. Default: 4
Returns:
String with the linear approximation to the equilibrium conditions.
Attributes:
None
'''
if round is True:
f = np.round(self.f,precision)
p = np.round(self.p,precision)
leftsides = []
rightsides = []
if self.log_linear==True:
lines ='Solution to the log-linear system:\n\n'
else:
lines ='Solution to the linear system:\n\n'
left_length = 1
for i in range(self.n_states):
left = ''
right = ''
right_plus_flag = 0
left+= self.names['variables'][i]+'[t+1]'
if all(np.isclose(0,p[i])):
right += self.names['shocks'][i]+'[t+1]'
else:
for j in range(self.n_states):
if not np.isclose(0,p[i][j]):
if right_plus_flag == 0:
right += str(p[i][j])+'·'+self.names['variables'][j]+'[t]'
right_plus_flag+=1
else:
if p[i][j] > 0:
right += '+'+str(p[i][j])+'·'+self.names['variables'][j]+'[t]'
else:
right += str(p[i][j])+'·'+self.names['variables'][j]+'[t]'
right+='+'+self.names['shocks'][i]+'[t+1]'
leftsides.append(left)
rightsides.append(right)
if len(left)>left_length:
left_length = len(left)
for i in range(self.n_vars-self.n_states):
left = ''
right = ''
right_plus_flag = 0
left+= self.names['variables'][self.n_states+i]+'[t]'
if all(np.isclose(0,f[i])):
right += '0'
else:
for j in range(self.n_states):
if not np.isclose(0,f[i][j]):
name = self.names['variables'][j]+'[t]'
if np.isclose(1,f[i][j]):
coeff = ''
elif np.isclose(-1,f[i][j]):
coeff = '-'
else:
coeff = str(f[i][j])+'·'
if right_plus_flag == 0:
right += coeff+name
right_plus_flag+=1
else:
if f[i][j] > 0:
right += '+'+coeff+name
else:
right += coeff+name
leftsides.append(left)
rightsides.append(right)
if len(left)>left_length:
left_length = len(left)
for i in range(self.n_vars):
leftsides[i] = leftsides[i].rjust(left_length)
lines+=leftsides[i]+' = '+rightsides[i]+'\n\n'
lines = lines[:-2]
return lines
def stoch_sim(self,T=51,drop_first=300,cov_mat=None,seed=None,percent=False,diff=True):
'''Computes a stohcastic simulation of the model.
Arguments:
T: (int) Number of periods to simulate. Default: 51
drop_first: (int) Number of periods to simulate before generating the simulated periods.
Default: 300
cov_mat: (list or Numpy.ndarray) Covariance matrix shocks. If cov_mat is None, it's set to
Numpy.eye(n_states). Default: None
seed: (int) Sets the seed for the Numpy random number generator. Default: None
percent: (bool) Whether to multiply simulated values by 100. Only works for log-linear
approximations. Default: False
diff: (bool) Subtract steady state for linear approximations (or log steady state for
log-linear approximations). Default: True
Returns
None
Attributes:
simulated: (Pandas DataFrame)
'''
# Set numbers of costate and state variables
n_costates = self.n_costates
n_states = self.n_states
# Initialize states
s0 = np.zeros([1,n_states])
# Set cov_mat if not given
if cov_mat is None:
cov_mat = np.eye(n_states)
# Set seed for the Numpy random number generator
if seed is not None and type(seed)==int:
np.random.seed(seed)
# Simulate shocks
eps = np.zeros([drop_first+T,n_states])
eps[:,:len(cov_mat)] = np.random.multivariate_normal(mean=np.zeros(len(cov_mat)),cov=cov_mat,size=[drop_first+T])
# Compute impulse responses given shocks
x = ir(self.f,self.p,eps,s0)
# Construct DataFrame
frameDict = {}
for j,exoName in enumerate(self.names['shocks']):
frameDict[exoName] = eps.T[j][drop_first:]
for i,endoName in enumerate(self.names['variables']):
if diff:
frameDict[endoName] = x[i][drop_first:]
else:
frameDict[endoName] = x[i][drop_first:] + self.ss[endoName]
simFrame = pd.DataFrame(frameDict,index = np.arange(T))
if percent==True:
simFrame = 100*simFrame
# Assign attribute
self.simulated = simFrame
### End of model class ####################################################################################
def ir(f,p,eps,s0=None):
'''Simulates a model in the following form:
u(t) = f*s(t) + e(t)
s(t+1) = p*s(t)
where s(t) is an (n_states x 1) vector of state variables, u(t) is an (n_costates x 1) vector of costate
variables, and e(t) is an (n_states x 1) vector of exogenous shocks.
Args:
f: (Numpy ndarray) Coefficnent matrix of appropriate size
p: (Numpy ndarray) Coefficnent matrix of appropriate size
eps: (Numpy ndarray) T x n_states array of exogenous shocks.
s0: (Numpy ndarray) 1 x n_states array of zeros of initial state value. Optional; Default: 0.
Returns
s: (Numpy ndarray) states simulated from t = 0,1,...,T-1
u: (Numpy ndarray) costates simulated from t = 0,1,...,T-1
'''
T = np.max(eps.shape)
n_states = np.shape(p)[0]
n_costates = np.shape(f)[0]
if s0 is None:
s0 = np.zeros([1,n_states])
s = np.array(np.zeros([T+1,n_states]))
u = np.array(np.zeros([T,n_costates]))
s[0]=s0
for i,e in enumerate(eps):
s[i+1] = p.dot(s[i]) + e
u[i] = f.dot(s[i+1])
s = s[1:]
return np.concatenate((s.T,u.T))
def klein(a=None,b=None,c=None,phi=None,n_states=None):
'''Solves linear dynamic models with the form of:
a*Et[x(t+1)] = b*x(t) + c*z(t)
with x(t) = [s(t); u(t)] where s(t) is a vector of predetermined (state) variables and u(t) is
a vector of nonpredetermined costate variables. z(t) is a vector of exogenous forcing variables with
autocorrelation matrix phi. The solution to the model is a set of matrices f, n, p, l such that:
u(t) = f*s(t) + n*z(t)
s(t+1) = p*s(t) + l*z(t).
The solution algorithm is based on Klein (2000) and his solab.m Matlab program.
Args:
a: (Numpy ndarray) Coefficient matrix on future-dated variables
b: (Numpy ndarray) Coefficient matrix on current-dated variables
c: (Numpy ndarray) Coefficient matrix on exogenous forcing variables
phi: (Numpy ndarray) Autocorrelation of exogenous forcing variables
n_states: (int) Number of state variables
Returns:
f: (Numpy ndarray) Solution matrix coeffients on s(t)
p: (Numpy ndarray) Solution matrix coeffients on s(t)
n: (Numpy ndarray) Solution matrix coeffients on z(t)
l: (Numpy ndarray) Solution matrix coeffients on z(t)
stab: (int) Indicates solution stability and uniqueness
stab == 1: too many stable eigenvalues
stab == -1: too few stable eigenvalues
stab == 0: just enoughstable eigenvalues
eig: The generalized eigenvalues from the Schur decomposition
'''
s,t,alpha,beta,q,z = la.ordqz(A=a,B=b,sort='ouc')
q=np.mat(q)
z=np.mat(z)
s=np.mat(s)
t=np.mat(t)
a=np.mat(a)
b=np.mat(b)
forcingVars = False
if len(np.shape(c))== 0:
nz = 0
phi = np.empty([0,0])
else:
forcingVars = True
nz = np.shape(c)[1]
# Components of the z matrix
z11 = z[0:n_states,0:n_states]
z12 = z[0:n_states,n_states:]
z21 = z[n_states:,0:n_states]
z22 = z[n_states:,n_states:]
# number of nonpredetermined variables
n_costates = np.shape(a)[0] - n_states
if n_states>0:
if np.linalg.matrix_rank(z11)<n_states:
sys.exit("Invertibility condition violated.")
s11 = s[0:n_states,0:n_states];
if n_states>0:
z11i = la.inv(z11)
s11i = la.inv(s11)
else:
z11i = z11
s11i = s11
# Components of the s,t,and q matrices
s12 = s[0:n_states,n_states:]
s22 = s[n_states:,n_states:]
t11 = t[0:n_states,0:n_states]
t12 = t[0:n_states,n_states:]
t22 = t[n_states:,n_states:]
q1 = q[0:n_states,:]
q2 = q[n_states:,:]
# Verify that there are exactly n_states stable (inside the unit circle) eigenvalues:
stab = 0
if n_states>0:
if np.abs(t[n_states-1,n_states-1])>np.abs(s[n_states-1,n_states-1]):
print('Warning: Too few stable eigenvalues.')
stab = -1
if n_states<n_states+n_costates:
if np.abs(t[n_states,n_states])<np.abs(s[n_states,n_states]):
print('Warning: Too many stable eigenvalues.')
stab = 1
# Compute the generalized eigenvalues
tii = np.diag(t)
sii = np.diag(s)
eig = np.zeros(np.shape(tii))
for k in range(len(tii)):
if np.abs(sii[k])>0:
eig[k] = np.abs(tii[k])/np.abs(sii[k])
else:
eig[k] = np.inf
# Solution matrix coefficients on the endogenous state
if n_states>0:
dyn = np.linalg.solve(s11,t11)
else:
dyn = np.array([])
f = np.real(z21.dot(z11i))
p = np.real(z11.dot(dyn).dot(z11i))
# Solution matrix coefficients on the exogenous state
if not forcingVars:
n = np.empty([n_costates,0])
l = np.empty([n_states,0])
else:
mat1 = np.kron(np.transpose(phi),s22) - np.kron(np.identity(nz),t22)
mat1i = la.inv(mat1)
q2c = q2.dot(c)
vecq2c = q2c.flatten(1).T
vecm = mat1i.dot(vecq2c)
m = np.transpose(np.reshape(np.transpose(vecm),(nz,n_costates)))
n = np.real((z22 - z21.dot(z11i).dot(z12)).dot(m))
l = np.real(-z11.dot(s11i).dot(t11).dot(z11i).dot(z12).dot(m) + z11.dot(s11i).dot(t12.dot(m) - s12.dot(m).dot(phi)+q1.dot(c)) + z12.dot(m).dot(phi))
return f,n,p,l,stab,eig
| mit |
procoder317/scikit-learn | sklearn/linear_model/logistic.py | 57 | 65098 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (DataConversionWarning,
check_X_y, NotFittedError)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
PythonCharmers/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
fastai/fastai | fastai/callback/captum.py | 1 | 5575 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/73_callback.captum.ipynb (unless otherwise specified).
__all__ = ['json_clean', 'CaptumInterpretation']
# Cell
import tempfile
from ..basics import *
# Cell
from ipykernel import jsonutil
# Cell
# Dirty hack as json_clean doesn't support CategoryMap type
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# Cell
from captum.attr import IntegratedGradients,NoiseTunnel,GradientShap,Occlusion
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
# Cell
class CaptumInterpretation():
"Captum Interpretation for Resnet"
def __init__(self,learn,cmap_name='custom blue',colors=None,N=256,methods=('original_image','heat_map'),
signs=("all", "positive"),outlier_perc=1):
if colors is None: colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')]
store_attr()
self.dls,self.model = learn.dls,self.learn.model
self.supported_metrics=['IG','NT','Occl']
def get_baseline_img(self, img_tensor,baseline_type):
baseline_img=None
if baseline_type=='zeros': baseline_img= img_tensor*0
if baseline_type=='uniform': baseline_img= torch.rand(img_tensor.shape)
if baseline_type=='gauss':
baseline_img= (torch.rand(img_tensor.shape).to(self.dls.device)+img_tensor)/2
return baseline_img.to(self.dls.device)
def visualize(self,inp,metric='IG',n_steps=1000,baseline_type='zeros',nt_type='smoothgrad', strides=(3,4,4), sliding_window_shapes=(3,15,15)):
if metric not in self.supported_metrics:
raise Exception(f"Metric {metric} is not supported. Currently {self.supported_metrics} are only supported")
tls = L([TfmdLists(inp, t) for t in L(ifnone(self.dls.tfms,[None]))])
inp_data=list(zip(*(tls[0],tls[1])))[0]
enc_data,dec_data=self._get_enc_dec_data(inp_data)
attributions=self._get_attributions(enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes)
self._viz(attributions,dec_data,metric)
def _viz(self,attributions,dec_data,metric):
default_cmap = LinearSegmentedColormap.from_list(self.cmap_name,self.colors, N=self.N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(dec_data[0].numpy(), (1,2,0)),
methods=self.methods,
cmap=default_cmap,
show_colorbar=True,
signs=self.signs,
outlier_perc=self.outlier_perc, titles=[f'Original Image - ({dec_data[1]})', metric])
def _get_enc_dec_data(self,inp_data):
dec_data=self.dls.after_item(inp_data)
enc_data=self.dls.after_batch(to_device(self.dls.before_batch(dec_data),self.dls.device))
return(enc_data,dec_data)
def _get_attributions(self,enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes):
# Get Baseline
baseline=self.get_baseline_img(enc_data[0],baseline_type)
supported_metrics ={}
if metric == 'IG':
self._int_grads = self._int_grads if hasattr(self,'_int_grads') else IntegratedGradients(self.model)
return self._int_grads.attribute(enc_data[0],baseline, target=enc_data[1], n_steps=200)
elif metric == 'NT':
self._int_grads = self._int_grads if hasattr(self,'_int_grads') else IntegratedGradients(self.model)
self._noise_tunnel= self._noise_tunnel if hasattr(self,'_noise_tunnel') else NoiseTunnel(self._int_grads)
return self._noise_tunnel.attribute(enc_data[0].to(self.dls.device), n_samples=1, nt_type=nt_type, target=enc_data[1])
elif metric == 'Occl':
self._occlusion = self._occlusion if hasattr(self,'_occlusion') else Occlusion(self.model)
return self._occlusion.attribute(enc_data[0].to(self.dls.device),
strides = strides,
target=enc_data[1],
sliding_window_shapes=sliding_window_shapes,
baselines=baseline)
# Cell
@patch
def insights(x: CaptumInterpretation,inp_data,debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = x.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
# captum v0.3 expects tensors without the batch dimension.
if nested_attr(normalize_func, 'mean.ndim', 4)==4: normalize_func.mean.squeeze_(0)
if nested_attr(normalize_func, 'std.ndim', 4)==4: normalize_func.std.squeeze_(0)
visualizer = AttributionVisualizer(
models=[x.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[ImageFeature("Image", baseline_transforms=[_baseline_func], input_transforms=[normalize_func])],
dataset=x._formatted_data_iter(dl,normalize_func))
visualizer.render(debug=debug) | apache-2.0 |
awanke/bokeh | examples/compat/mpl/polycollection.py | 34 | 1276 | from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Generate data. In this case, we'll make a bunch of center-points and generate
# verticies by subtracting random offsets from those center-points
numpoly, numverts = 100, 4
centers = 100 * (np.random.random((numpoly, 2)) - 0.5)
offsets = 10 * (np.random.random((numverts, numpoly, 2)) - 0.5)
verts = centers + offsets
verts = np.swapaxes(verts, 0, 1)
# In your case, "verts" might be something like:
# verts = zip(zip(lon1, lat1), zip(lon2, lat2), ...)
# If "data" in your case is a numpy array, there are cleaner ways to reorder
# things to suit.
facecolors = ['red', 'green', 'blue', 'cyan', 'yellow', 'magenta', 'black']
edgecolors = ['cyan', 'yellow', 'magenta', 'black', 'red', 'green', 'blue']
widths = [5, 10, 20, 10, 5]
# Make the collection and add it to the plot.
col = PolyCollection(verts, facecolor=facecolors, edgecolor=edgecolors,
linewidth=widths, linestyle='--', alpha=0.5)
ax = plt.axes()
ax.add_collection(col)
plt.xlim([-60, 60])
plt.ylim([-60, 60])
plt.title("MPL-PolyCollection support in Bokeh")
output_file("polycollection.html")
show(mpl.to_bokeh())
| bsd-3-clause |
r9y9/librosa | docs/examples/plot_presets.py | 3 | 3179 | # coding: utf-8
"""
=======
Presets
=======
This notebook demonstrates how to use the `presets` package to change the
default parameters for librosa.
"""
# Code source: Brian McFee
# License: ISC
##################################################
# We'll need numpy and matplotlib for this example
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# Import the Preset class
from presets import Preset
# To use presets, we'll make a dummy import of librosa
# and the display submodule here.
import librosa as _librosa
import librosa.display as _display
# The assignment here is to circumvent python's inability
# to rename submodule imports within the package
_librosa.display = _display
#########################################################################
# By default, librosa uses the following parameters across all functions:
# - `sr=22050` (sampling rate)
# - `hop_length=512` (number of samples between frames)
# - `n_fft=2048` (number of samples per frame in STFT-like analyses)
#
# You may want to change these values to suit your application, but
# doing so consistently in every function call can be somewhat cumbersome.
#
# Presets makes it easy to do this all at once by wrapping the module
# and all function calls, and overriding default arguments.
# First, we need to set up the preset-wrapped librosa import
librosa = Preset(_librosa)
# To change the default sampling rate, we can set the `sr` entry:
librosa['sr'] = 44100
# and similarly for hop_length and n_fft
librosa['hop_length'] = 1024
librosa['n_fft'] = 4096
# In general, when you set `librosa['X']` for any string `X`, anywhere within
# librosa where the parameter `X` occurs as a keyword-argument,
# its default value will be replaced by whatever value you provide.
######################################################################
# Now we can load in a file and do some analysis with the new defaults
filename = 'audio/Karissa_Hobbs_-_09_-_Lets_Go_Fishin.mp3'
y, sr = librosa.load(filename, duration=5, offset=35)
# Generate a Mel spectrogram:
M = librosa.feature.melspectrogram(y=y)
# Of course, you can still override the new default manually, e.g.:
M_highres = librosa.feature.melspectrogram(y=y, hop_length=512)
# And plot the results
plt.figure(figsize=(6, 6))
ax = plt.subplot(3, 1, 1)
librosa.display.specshow(librosa.power_to_db(M, ref=np.max),
y_axis='mel', x_axis='time')
plt.title('44100/1024/4096')
plt.subplot(3, 1, 2, sharex=ax, sharey=ax)
librosa.display.specshow(librosa.power_to_db(M_highres, ref=np.max),
hop_length=512,
y_axis='mel', x_axis='time')
plt.title('44100/512/4096')
# We can repeat the whole process with different defaults, just by
# updating the parameter entries
librosa['sr'] = 11025
y2, sr2 = librosa.load(filename, duration=5, offset=35)
M2 = librosa.feature.melspectrogram(y=y2, sr=sr2)
plt.subplot(3, 1, 3, sharex=ax, sharey=ax)
librosa.display.specshow(librosa.power_to_db(M2, ref=np.max),
y_axis='mel', x_axis='time')
plt.title('11025/1024/4096')
plt.tight_layout()
plt.show()
| isc |
zmlabe/IceVarFigs | Scripts/Temperature/plot_ArcticAmplification_MovingLines.py | 1 | 6837 | """
Plot change in sea ice extent (NSIDC) and temperature (BEST) for annual means
from 1979 to 2017.
Author : Zachary M. Labe
Date : 20 August 2018
"""
### Import modules
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
### Time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Add directories
directorydata = './Data/'
directoryfigure = './Figures/'
### Load data files for Extent (e) and Temperature anomalies (t)
filee = 'NSIDC_AnnualSIE_2019_MeanMonth.txt'
filet = 'BEST_Arctic.txt'
### Years through 2019
years = np.arange(1979,2019+1,1)
### Read file
eq = np.genfromtxt(directorydata + filee,unpack=True)
tq = np.genfromtxt(directorydata + filet,delimiter=',',skip_header=1,
unpack=True,usecols=[1])
tq = tq[-41:]
print('\nCompleted: Read AA data!')
############################################################################
############################################################################
############################################################################
### Create animation
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='darkgrey')
plt.rc('ytick',color='darkgrey')
plt.rc('axes',labelcolor='darkgrey')
plt.rc('axes',facecolor='black')
fig = plt.figure()
### Subplot for Extent
ax = plt.subplot(122)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
plt.text(1979,10.03,r'\textbf{SEA-ICE EXTENT}',color='deepskyblue',alpha=0.5,ha='left',
fontsize=15,rotation=0,va='center',zorder=1)
ax.tick_params('both',length=5.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ant, = plt.plot(years,eq,linestyle='-',linewidth=3,
color='deepskyblue',zorder=2,clip_on=False)
plt.scatter(years[-1],eq[-1],s=30,color='gold',zorder=9,clip_on=False)
xlabels = map(str,np.arange(1979,2020,5))
plt.xticks(np.arange(1979,2020,5),xlabels,fontsize=7)
ylabels = map(str,np.arange(10,13,0.5))
plt.yticks(np.arange(10,13,0.5),ylabels,fontsize=7)
plt.ylim([10,12.5])
plt.xlim([1979,2019])
plt.text(2020.2,10.16,r'\textbf{2019}',fontsize=9,color='gold',ha='left')
plt.text(1979,9.5,r'\textbf{DATA:} NSIDC Sea Ice Index v3.0 (\textbf{ANNUAL}, Satellite)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(1979,9.42,r'\textbf{SOURCE:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(1979,9.34,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(1968,12.64,r'\textbf{[\textbf{$\times$10$^{6}$ km$^{2}$}]}',color='darkgrey',
fontsize=12,va='center',alpha=1)
plt.text(1970,13.1,r'\textbf{ARCTIC CLIMATE}',fontsize=32,color='w',
ha='center',va='center',alpha=1)
###########################################################################
###########################################################################
###########################################################################
### Subplot for Volume
ax = plt.subplot(121)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
plt.text(1979,-2.95,r'\textbf{AIR TEMPERATURE}',color='crimson',alpha=0.5,ha='left',
fontsize=15,rotation=0,va='center',zorder=1)
gre, = plt.plot(years,tq,linestyle='-',linewidth=3,
color='crimson',zorder=2,clip_on=False)
plt.scatter(years[-1],tq[-1],s=30,color='gold',zorder=9,clip_on=False)
xlabels = map(str,np.arange(1979,2020,5))
plt.xticks(np.arange(1979,2020,5),xlabels,fontsize=7)
ylabels = map(str,np.arange(-6,7,1))
plt.yticks(np.arange(-6,7,1),ylabels,fontsize=7)
plt.ylim([-3,3])
plt.xlim([1979,2019])
plt.text(1979,-4.22,r'\textbf{DATA:} Berkeley Earth Data using NOAA/ESRL [WRIT Tool; +67$\bf{^\circ}$N]',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(1979,-4.40,r'\textbf{SOURCE:} https://www.esrl.noaa.gov/psd/cgi-bin/data/testdap/timeseries.pl',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(1979,-4.58,r'\textbf{BASELINE:} Temperature \textbf{anomalies} computed from 1981-2010',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey',alpha=1)
plt.text(2020.25,1.73,r'\textbf{2019}',fontsize=9,color='gold',ha='left')
plt.text(1974.2,3.35,r'\textbf{[$\bf{^\circ}$C]}',color='darkgrey',
fontsize=12,va='center',alpha=1)
fig.subplots_adjust(wspace=0.4)
fig.subplots_adjust(top=0.75)
fig.subplots_adjust(bottom=0.2)
### Create animation using matplotlib
def update(num,years,eq,tq,ant,gre,bar):
ant.set_data(years[:num+1],eq[:num+1])
ant.axes.axis([1979,2019,10,12.5])
gre.set_data(years[:num+1],tq[:num+1])
gre.axes.axis([1979,2019,-3,3])
return bar,
ani = animation.FuncAnimation(fig,update,60,fargs=[years,
eq,tq,ant,gre,gre],interval=0.01,blit=True)
### Save figure
#plt.savefig(directoryfigure + 'ArcticAmplification_moving.png',dpi=300)
ani.save(directoryfigure + 'ArcticAmplification.gif',dpi=300,writer='imagemagick')
print('\nCompleted: Script done!')
| mit |
luzhijun/Optimization | cma-es/cluster/cluster1.py | 2 | 4179 | #!usr/bin/env python
#encoding: utf-8
__author__="luzhijun"
'''
cma restart test
'''
import cma
import time
import numpy as np
import matplotlib.pyplot as plt
import pickle
from multiprocessing import Pool
up=2
down=-2
def testFunc(X):
#time.sleep(0.1)
return cma.fcts.rosen(X)
rec=lambda v,p:map(lambda x:x/float(p),v)
#归一化
def autoNormal(X):
maxV=map(max,X)
nV=[X[i]/maxV[i] for i in range(len(maxV))]
return nV
def makeCluster(X,threshold):
X=autoNormal(X)
l=len(X)
i=0
remove=set()
clu=[]
for xi in range(l):
clu.append([xi])
while i<l:
if i in remove:
i+=1
continue
for j in range(i+1,l):
M=sum((X[i]-X[j])**2)
if M<threshold and j not in remove:
remove.add(j)
clu[i].append(j)
i+=1
for xi in remove:
clu.remove([xi])
return clu
def makeEvals(solutions,threshold):
'''
make Function values use makeCluster.
'''
clu=makeCluster(solutions,threshold)
evalKeys=[x[0] for x in clu]
evalValues=map(testFunc,[solutions[ei] for ei in evalKeys])
evals=[]
for i,ci in enumerate(clu):
for j in range(len(ci)):
evals.append(evalValues[i])
return evals
result_list = []
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
result_list.append(result)
def cmaUser(mu=0.5,dim=100,sigma=0.3,popsize=120):
es = cma.CMAEvolutionStrategy(dim * [mu], sigma,{'popsize':popsize,'bounds':[down, up]})
sigmas=[]
t1=time.time()
while not es.stop() :
solutions = es.ask()
sigmas.append(es.sigma)
values=map(testFunc,solutions)
es.tell(solutions,values)
return sigmas,es.countiter,es.result()[1],es.result()[0],time.time()-t1,"normal"
def cmaUser1(dis,mu=0.5,dim=100,sigma=0.3,popsize=120):
es = cma.CMAEvolutionStrategy(dim * [mu], sigma,{'popsize':popsize,'bounds':[down, up]})
sigmas=[]
t1=time.time()
while not es.stop() :
solutions = es.ask()
sigmas.append(es.sigma)
if es.sigma<sigma*0.1:
values=makeEvals(solutions,dis)
else:
values=map(testFunc,solutions)
es.tell(solutions,values)
return sigmas,es.countiter,es.result()[1],es.result()[0],time.time()-t1,"cluster distance:%s"%dis
def main():
dis=[1e-4,1e-5,1e-6]
pool=Pool()
for disi in dis:
pool.apply_async(cmaUser1,args=(disi,),callback = log_result)
pool.apply_async(cmaUser,callback = log_result)
pool.close()
pool.join()
print "finished"
plt.figure(1)
plts=[]
length=len(result_list)
for i in range(length):
p1,=plt.plot(result_list[i][0])
plts.append(p1)
print('iter count:%s'%result_list[i][1])
print result_list[i][2]
print result_list[i][3]
print result_list[i][4]
print result_list[i][5]
print "------------"
plt.plot([0.015]*8000,'--')
plt.ylabel('s plt.ylim(0,0.1)igma')
plt.xlabel('counter')
plt.title('dim=100,popsize=150')
plt.legend(plts,[result_list[i][5] for i in range(length)])
plt.savefig(u'fig.pdf')
def main1():
dis=[1e-3,1e-4,1e-5,1e-6]
pool=Pool()
for disi in dis:
pool.apply_async(cmaUser1,args=(disi,),callback = log_result)
pool.apply_async(cmaUser,callback = log_result)
pool.close()
pool.join()
print "finished"
with open('data.tl','w') as f:
pickle.dump(result_list,f,pickle.HIGHEST_PROTOCOL)
def testtime(dis,mu=0.5,dim=100,sigma=0.3,popsize=120):
es = cma.CMAEvolutionStrategy(dim * [mu], sigma,{'popsize':popsize,'bounds':[down, up]})
t1=time.time()
i=0
while i<3:
solutions = es.ask()
values=makeEvals(solutions,dis)
#values=map(testFunc,solutions)
es.tell(solutions,values)
i+=1
print (time.time()-t1)/3.0*100
if __name__ == '__main__':
#print cmaUser1(0.0001)
#main1()
#testtime(0.01)
testtime(0.001)
| apache-2.0 |
google-research/google-research | many_constraints/intersectional_fairness.py | 1 | 20803 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Intersectional fairness with many constraint."""
import random
from absl import app
from absl import flags
import numpy as np
import pandas as pd
from sklearn import model_selection
import tensorflow.compat.v1 as tf
import tensorflow_constrained_optimization as tfco
flags.DEFINE_boolean("constrained", True, "Perform constrained optimization?")
flags.DEFINE_float("dual_scale", 0.01, "Dual scale for gamma-updates.")
flags.DEFINE_float("epsilon", 0.01, "Slack.")
flags.DEFINE_integer("loops", 100000, "No. of loops.")
flags.DEFINE_integer("num_layers", 2,
"No. of hidden layers for multiplier model.")
flags.DEFINE_integer("num_nodes", 100,
"No. of hidden nodes for multiplier model.")
FLAGS = flags.FLAGS
def load_data():
"""Loads and returns data."""
# List of column names in the dataset.
column_names = ["state", "county", "community", "communityname", "fold",
"population", "householdsize", "racepctblack", "racePctWhite",
"racePctAsian", "racePctHisp", "agePct12t21", "agePct12t29",
"agePct16t24", "agePct65up", "numbUrban", "pctUrban",
"medIncome", "pctWWage", "pctWFarmSelf", "pctWInvInc",
"pctWSocSec", "pctWPubAsst", "pctWRetire", "medFamInc",
"perCapInc", "whitePerCap", "blackPerCap", "indianPerCap",
"AsianPerCap", "OtherPerCap", "HispPerCap", "NumUnderPov",
"PctPopUnderPov", "PctLess9thGrade", "PctNotHSGrad",
"PctBSorMore", "PctUnemployed", "PctEmploy", "PctEmplManu",
"PctEmplProfServ", "PctOccupManu", "PctOccupMgmtProf",
"MalePctDivorce", "MalePctNevMarr", "FemalePctDiv",
"TotalPctDiv", "PersPerFam", "PctFam2Par", "PctKids2Par",
"PctYoungKids2Par", "PctTeen2Par", "PctWorkMomYoungKids",
"PctWorkMom", "NumIlleg", "PctIlleg", "NumImmig",
"PctImmigRecent", "PctImmigRec5", "PctImmigRec8",
"PctImmigRec10", "PctRecentImmig", "PctRecImmig5",
"PctRecImmig8", "PctRecImmig10", "PctSpeakEnglOnly",
"PctNotSpeakEnglWell", "PctLargHouseFam", "PctLargHouseOccup",
"PersPerOccupHous", "PersPerOwnOccHous", "PersPerRentOccHous",
"PctPersOwnOccup", "PctPersDenseHous", "PctHousLess3BR",
"MedNumBR", "HousVacant", "PctHousOccup", "PctHousOwnOcc",
"PctVacantBoarded", "PctVacMore6Mos", "MedYrHousBuilt",
"PctHousNoPhone", "PctWOFullPlumb", "OwnOccLowQuart",
"OwnOccMedVal", "OwnOccHiQuart", "RentLowQ", "RentMedian",
"RentHighQ", "MedRent", "MedRentPctHousInc",
"MedOwnCostPctInc", "MedOwnCostPctIncNoMtg", "NumInShelters",
"NumStreet", "PctForeignBorn", "PctBornSameState",
"PctSameHouse85", "PctSameCity85", "PctSameState85",
"LemasSwornFT", "LemasSwFTPerPop", "LemasSwFTFieldOps",
"LemasSwFTFieldPerPop", "LemasTotalReq", "LemasTotReqPerPop",
"PolicReqPerOffic", "PolicPerPop", "RacialMatchCommPol",
"PctPolicWhite", "PctPolicBlack", "PctPolicHisp",
"PctPolicAsian", "PctPolicMinor", "OfficAssgnDrugUnits",
"NumKindsDrugsSeiz", "PolicAveOTWorked", "LandArea",
"PopDens", "PctUsePubTrans", "PolicCars", "PolicOperBudg",
"LemasPctPolicOnPatr", "LemasGangUnitDeploy",
"LemasPctOfficDrugUn", "PolicBudgPerPop",
"ViolentCrimesPerPop"]
dataset_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data"
# Read dataset from the UCI web repository and assign column names.
data_df = pd.read_csv(dataset_url, sep=",", names=column_names,
na_values="?")
# Make sure there are no missing values in the "ViolentCrimesPerPop" column.
assert not data_df["ViolentCrimesPerPop"].isna().any()
# Binarize the "ViolentCrimesPerPop" column and obtain labels.
crime_rate_70_percentile = data_df["ViolentCrimesPerPop"].quantile(q=0.7)
labels_df = (data_df["ViolentCrimesPerPop"] >= crime_rate_70_percentile)
# Now that we have assigned binary labels,
# we drop the "ViolentCrimesPerPop" column from the data frame.
data_df.drop(columns="ViolentCrimesPerPop", inplace=True)
# Group features.
groups_df = pd.concat(
[data_df["racepctblack"], data_df["racePctAsian"],
data_df["racePctHisp"]], axis=1)
# Drop categorical features.
data_df.drop(
columns=["state", "county", "community", "communityname", "fold"],
inplace=True)
# Handle missing features.
feature_names = data_df.columns
for feature_name in feature_names:
missing_rows = data_df[feature_name].isna()
if missing_rows.any():
data_df[feature_name].fillna(0.0, inplace=True) # Fill NaN with 0.
missing_rows.rename(feature_name + "_is_missing", inplace=True)
# Append boolean "is_missing" feature.
data_df = data_df.join(missing_rows)
labels = labels_df.values.astype(np.float32)
groups = groups_df.values.astype(np.float32)
features = data_df.values.astype(np.float32)
# Set random seed so that the results are reproducible.
np.random.seed(121212)
# Train, vali and test indices.
train_indices, test_indices = model_selection.train_test_split(
range(features.shape[0]), test_size=0.25)
train_indices, vali_indices = model_selection.train_test_split(
train_indices, test_size=1./3.)
# Train features, labels and protected groups.
x_train = features[train_indices, :]
y_train = labels[train_indices]
z_train = groups[train_indices]
# Vali features, labels and protected groups.
x_vali = features[vali_indices, :]
y_vali = labels[vali_indices]
z_vali = groups[vali_indices]
# Test features, labels and protected groups.
x_test = features[test_indices, :]
y_test = labels[test_indices]
z_test = groups[test_indices]
return (x_train, y_train, z_train, x_vali, y_vali, z_vali, x_test, y_test,
z_test)
def error_rate(labels, predictions, groups=None):
# Returns the error rate for given labels and predictions.
if groups is not None:
if np.sum(groups) == 0.0:
return 0.0
predictions = predictions[groups]
labels = labels[groups]
signed_labels = labels - 0.5
return np.mean(signed_labels * predictions <= 0.0)
def group_membership_thresholds(
group_feature_train, group_feature_vali, group_feature_test, thresholds):
"""Returns the group membership vectors on train, test and vali sets."""
group_memberships_list_train_ = []
group_memberships_list_vali_ = []
group_memberships_list_test_ = []
group_thresholds_list = []
for t1 in thresholds[0]:
for t2 in thresholds[1]:
for t3 in thresholds[2]:
group_membership_train = (group_feature_train[:, 0] > t1) & (
group_feature_train[:, 1] > t2) & (group_feature_train[:, 2] > t3)
group_membership_vali = (group_feature_vali[:, 0] > t1) & (
group_feature_vali[:, 1] > t2) & (group_feature_vali[:, 2] > t3)
group_membership_test = (group_feature_test[:, 0] > t1) & (
group_feature_test[:, 1] > t2) & (group_feature_test[:, 2] > t3)
if (np.mean(group_membership_train) <= 0.01) or (
np.mean(group_membership_vali) <= 0.01) or (
np.mean(group_membership_test) <= 0.01):
# Only consider groups that are at least 1% in size.
continue
group_memberships_list_train_.append(group_membership_train)
group_memberships_list_vali_.append(group_membership_vali)
group_memberships_list_test_.append(group_membership_test)
group_thresholds_list.append([t1, t2, t3])
group_memberships_list_train_ = np.array(group_memberships_list_train_)
group_memberships_list_vali_ = np.array(group_memberships_list_vali_)
group_memberships_list_test_ = np.array(group_memberships_list_test_)
group_thresholds_list = np.array(group_thresholds_list)
return (group_memberships_list_train_, group_memberships_list_vali_,
group_memberships_list_test_, group_thresholds_list)
def violation(
labels, predictions, epsilon, group_memberships_list):
# Returns violations across different group feature thresholds.
viol_list = []
overall_error = error_rate(labels, predictions)
for kk in range(group_memberships_list.shape[0]):
group_err = error_rate(
labels, predictions, group_memberships_list[kk, :].reshape(-1,))
viol_list += [group_err - overall_error - epsilon]
return np.max(viol_list), viol_list
def evaluate(
features, labels, model, epsilon, group_membership_list):
# Evaluates and prints stats.
predictions = model(features).numpy().reshape(-1,)
print("Error %.3f" % error_rate(labels, predictions))
_, viol_list = violation(labels, predictions, epsilon, group_membership_list)
print("99p Violation %.3f" % np.quantile(viol_list, 0.99))
print()
def create_model(dimension):
# Creates linear Keras model with no hidden layers.
layers = []
layers.append(tf.keras.Input(shape=(dimension,)))
layers.append(tf.keras.layers.Dense(1))
model = tf.keras.Sequential(layers)
return model
def create_multiplier_model(
feature_dependent_multiplier=True, dim=1, hidden_layers=None):
"""Creates Lagrange multipler model with specified hidden layers."""
if feature_dependent_multiplier:
layers = []
layers.append(tf.keras.Input(shape=dim))
for num_nodes in hidden_layers:
layers.append(tf.keras.layers.Dense(num_nodes, activation="relu"))
layers.append(tf.keras.layers.Dense(1, bias_initializer="ones"))
# Keras model.
multiplier_model = tf.keras.Sequential(layers)
multiplier_weights = multiplier_model.trainable_weights
else:
common_multiplier = tf.Variable(1.0, name="common_multiplier")
# Ignore feature input, and return common multiplier.
multiplier_model = lambda x: common_multiplier
multiplier_weights = [common_multiplier]
return multiplier_model, multiplier_weights
def train_unconstrained(
dataset, group_info, epsilon=0.01, loops=10000, skip_steps=400):
"""Train unconstrained classifier.
Args:
dataset: train, vali and test sets
group_info: group memberships on train, vali and test sets and thresholds
epsilon: constraint slack
loops: number of gradient steps
skip_steps: steps to skip before snapshotting metrics
"""
tf.set_random_seed(121212)
np.random.seed(212121)
random.seed(333333)
x_train, y_train, _, x_vali, y_vali, _, x_test, y_test, _ = dataset
(group_memberships_list_train, group_memberships_list_vali,
group_memberships_list_test, _) = group_info
model = create_model(x_train.shape[-1])
features_tensor = tf.constant(x_train)
labels_tensor = tf.constant(y_train)
predictions = lambda: model(features_tensor)
predictions_vali = lambda: model(x_vali)
predictions_test = lambda: model(x_test)
context = tfco.rate_context(predictions, labels=lambda: labels_tensor)
overall_error = tfco.error_rate(context, penalty_loss=tfco.HingeLoss())
problem = tfco.RateMinimizationProblem(overall_error)
loss_fn, update_ops_fn, _ = tfco.create_lagrangian_loss(problem)
optimizer = tf.keras.optimizers.Adagrad(0.1)
objectives_list = []
objectives_list_test = []
objectives_list_vali = []
violations_list = []
violations_list_test = []
violations_list_vali = []
model_weights = []
for ii in range(loops):
update_ops_fn()
optimizer.minimize(loss_fn, var_list=model.trainable_weights)
# Snapshot iterate once in 1000 loops.
if ii % skip_steps == 0:
pred = np.reshape(predictions(), (-1,))
err = error_rate(y_train, pred)
max_viol, viol_list = violation(
y_train, pred, epsilon, group_memberships_list_train)
pred_test = np.reshape(predictions_test(), (-1,))
err_test = error_rate(y_test, pred_test)
_, viol_list_test = violation(
y_test, pred_test, epsilon, group_memberships_list_test)
pred_vali = np.reshape(predictions_vali(), (-1,))
err_vali = error_rate(y_vali, pred_vali)
max_viol_vali, viol_list_vali = violation(
y_vali, pred_vali, epsilon, group_memberships_list_vali)
objectives_list.append(err)
objectives_list_test.append(err_test)
objectives_list_vali.append(err_vali)
violations_list.append(viol_list)
violations_list_test.append(viol_list_test)
violations_list_vali.append(viol_list_vali)
model_weights.append(model.get_weights())
if ii % 1000 == 0:
print("Epoch %d | Error = %.3f | Viol = %.3f | Viol_vali = %.3f" %
(ii, err, max_viol, max_viol_vali), flush=True)
# Best candidate index.
best_ind = np.argmin(objectives_list)
model.set_weights(model_weights[best_ind])
print("Train:")
evaluate(x_train, y_train, model, epsilon, group_memberships_list_train)
print("\nVali:")
evaluate(x_vali, y_vali, model, epsilon, group_memberships_list_vali)
print("\nTest:")
evaluate(x_test, y_test, model, epsilon, group_memberships_list_test)
def train_constrained(
dataset, group_info, epsilon=0.01, learning_rate=0.1, dual_scale=5.0,
loops=10000, feature_dependent_multiplier=True, hidden_layers=None,
skip_steps=400):
"""Train constrained classifier wth Lagrangian model.
Args:
dataset: train, vali and test sets
group_info: group memberships on train, vali and test sets and thresholds
epsilon: constraint slack
learning_rate: learning rate for theta
dual_scale: learning rate for gamma = dual_scale * learning_rate
loops: number of gradient steps
feature_dependent_multiplier: should the multiplier model be feature
dependent. If False, a common multipler is used for all constraints
hidden_layers: list of hidden layer nodes to be used for multiplier model
skip_steps: steps to skip before snapshotting metrics
"""
tf.set_random_seed(121212)
np.random.seed(212121)
random.seed(333333)
x_train, y_train, z_train, x_vali, y_vali, _, x_test, y_test, _ = dataset
(group_memberships_list_train,
group_memberships_list_vali,
group_memberships_list_test,
group_memberships_thresholds_train) = group_info
# Models and group thresholds tensor.
model = create_model(x_train.shape[-1])
multiplier_model, multiplier_weights = create_multiplier_model(
feature_dependent_multiplier=feature_dependent_multiplier,
dim=3,
hidden_layers=hidden_layers)
group_thresholds = tf.Variable(np.ones(3) * 0.1, dtype=tf.float32)
# Features, labels, predictions, multipliers.
features_tensor = tf.constant(x_train)
labels_tensor = tf.constant(y_train)
features_tensor_vali = tf.constant(x_vali)
predictions = lambda: model(features_tensor)
predictions_vali = lambda: model(features_tensor_vali)
predictions_test = lambda: model(x_test)
def multiplier_values():
return tf.abs(multiplier_model(tf.reshape(group_thresholds, shape=(1, -1))))
# Lagrangian loss function.
def lagrangian_loss():
# Separate out objective, constraints and proxy constraints.
objective = problem.objective()
constraints = problem.constraints()
proxy_constraints = problem.proxy_constraints()
# Set-up custom Lagrangian loss.
primal = objective
multipliers = multiplier_values()
primal += tf.stop_gradient(multipliers) * proxy_constraints
dual = dual_scale * multipliers * tf.stop_gradient(constraints)
return primal - dual
# Objective.
context = tfco.rate_context(
predictions,
labels=lambda: labels_tensor)
overall_error = tfco.error_rate(context)
# Slice and subset group predictions and labels.
def group_membership():
return (z_train[:, 0] > group_thresholds[0]) & (
z_train[:, 1] > group_thresholds[1]) & (
z_train[:, 2] > group_thresholds[2])
def group_predictions():
pred = predictions()
groups = tf.reshape(group_membership(), (-1, 1))
return pred[groups]
def group_labels():
groups = tf.reshape(group_membership(), (-1,))
return labels_tensor[groups]
# Constraint.
group_context = tfco.rate_context(
group_predictions,
labels=group_labels)
group_error = tfco.error_rate(group_context)
constraints = [group_error <= overall_error + epsilon]
# Set up constrained optimization problem and optimizer.
problem = tfco.RateMinimizationProblem(overall_error, constraints)
optimizer = tf.keras.optimizers.Adagrad(learning_rate)
var_list = model.trainable_weights + multiplier_weights
objectives_list = []
objectives_list_test = []
objectives_list_vali = []
violations_list = []
violations_list_test = []
violations_list_vali = []
model_weights = []
# Training
for ii in range(loops):
# Sample a group membership at random.
random_index = np.random.randint(
group_memberships_thresholds_train.shape[0])
group_thresholds.assign(group_memberships_thresholds_train[random_index, :])
# Gradient op.
problem.update_ops()
optimizer.minimize(lagrangian_loss, var_list=var_list)
# Snapshot iterate once in 1000 loops.
if ii % skip_steps == 0:
pred = np.reshape(predictions(), (-1,))
err = error_rate(y_train, pred)
max_viol, viol_list = violation(
y_train, pred, epsilon, group_memberships_list_train)
pred_test = np.reshape(predictions_test(), (-1,))
err_test = error_rate(y_test, pred_test)
_, viol_list_test = violation(
y_test, pred_test, epsilon, group_memberships_list_test)
pred_vali = np.reshape(predictions_vali(), (-1,))
err_vali = error_rate(y_vali, pred_vali)
max_viol_vali, viol_list_vali = violation(
y_vali, pred_vali, epsilon, group_memberships_list_vali)
objectives_list.append(err)
objectives_list_test.append(err_test)
objectives_list_vali.append(err_vali)
violations_list.append(viol_list)
violations_list_test.append(viol_list_test)
violations_list_vali.append(viol_list_vali)
model_weights.append(model.get_weights())
if ii % 1000 == 0:
print("Epoch %d | Error = %.3f | Viol = %.3f | Viol_vali = %.3f" %
(ii, err, max_viol, max_viol_vali), flush=True)
# Best candidate index.
best_ind = tfco.find_best_candidate_index(
np.array(objectives_list), np.array(violations_list),
rank_objectives=False)
model.set_weights(model_weights[best_ind])
print("Train:")
evaluate(x_train, y_train, model, epsilon, group_memberships_list_train)
print("\nVali:")
evaluate(x_vali, y_vali, model, epsilon, group_memberships_list_vali)
print("\nTest:")
evaluate(x_test, y_test, model, epsilon, group_memberships_list_test)
def main(argv):
del argv
tf.compat.v1.enable_eager_execution()
# Load data.
dataset = load_data()
_, _, z_train, _, _, z_vali, _, _, z_test = dataset
# Group Thresholds for 3 Groups
group_threshold_range = []
for jj in range(3):
group_threshold_range.append([np.quantile(
z_train[:, jj], kk) for kk in np.arange(0.05, 1.0, 0.1)])
# Group memberships based on group thresholds.
group_info = group_membership_thresholds(
z_train, z_vali, z_test, group_threshold_range)
if FLAGS.constrained:
if FLAGS.num_layers < 0:
train_constrained(
dataset,
group_info,
feature_dependent_multiplier=False,
epsilon=FLAGS.epsilon,
dual_scale=FLAGS.dual_scale,
loops=FLAGS.loops)
else:
train_constrained(
dataset,
group_info,
feature_dependent_multiplier=True,
hidden_layers=[FLAGS.num_nodes] * FLAGS.num_layers,
epsilon=FLAGS.epsilon,
dual_scale=FLAGS.dual_scale,
loops=FLAGS.loops)
else:
train_unconstrained(
dataset, group_info, epsilon=FLAGS.epsilon, loops=FLAGS.loops)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
robcarver17/systematictradingexamples | plots_for_perhaps/equitysectorweights.py | 1 | 6074 | from scipy.optimize import minimize
from copy import copy
import random
import pandas as pd
import numpy as np
from datetime import datetime as dt
def create_dull_pd_matrix(dullvalue=0.0, dullname="A", startdate=pd.datetime(1970,1,1).date(), enddate=dt.now().date(), index=None):
"""
create a single valued pd matrix
"""
if index is None:
index=pd.date_range(startdate, enddate)
dullvalue=np.array([dullvalue]*len(index))
ans=pd.DataFrame(dullvalue, index, columns=[dullname])
return ans
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def sigma_from_corr(std, corr):
sigma=std*corr*std
return sigma
def basic_opt(std,corr,mus):
number_assets=mus.shape[0]
sigma=sigma_from_corr(std, corr)
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
def neg_SR_riskfree(weights, sigma, mus, riskfree=0.005):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0] - riskfree
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def equalise_vols(returns, default_vol):
"""
Normalises returns so they have the in sample vol of defaul_vol (annualised)
Assumes daily returns
"""
factors=(default_vol/16.0)/returns.std(axis=0)
facmat=create_dull_pd_matrix(dullvalue=factors, dullname=returns.columns, index=returns.index)
norm_returns=returns*facmat
norm_returns.columns=returns.columns
return norm_returns
def offdiag_matrix(offvalue, nlength):
identity=np.diag([1.0]*nlength)
for x in range(nlength):
for y in range(nlength):
if x!=y:
identity[x][y]=offvalue
return identity
def get_avg_corr(sigma):
new_sigma=copy(sigma)
np.fill_diagonal(new_sigma,np.nan)
return np.nanmean(new_sigma)
def read_ts_csv(fname, dindex="Date"):
data=pd.read_csv(fname)
dateindex=[dt.strptime(dx, "%d/%m/%y") for dx in list(data[dindex])]
data.index=dateindex
del(data[dindex])
return data
def markosolver(returns, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Returns the optimal portfolio for the dataframe returns
If equalisemeans=True then assumes all assets have same return if False uses the asset means
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
Note if usemeans=True and equalisevols=True effectively assumes all assets have same sharpe ratio
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
## Sigma matrix
sigma=use_returns.cov().values
## Expected mean returns
est_mus=[use_returns[asset_name].mean() for asset_name in use_returns.columns]
missingvals=[np.isnan(x) for x in est_mus]
if equalisemeans:
## Don't use the data - Set to the average Sharpe Ratio
mus=[default_vol*default_SR]*returns.shape[1]
else:
mus=est_mus
mus=np.array(mus, ndmin=2).transpose()
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
wts=ans['x']
return wts
def bootstrap_portfolio(returns_to_bs, monte_carlo=500, monte_length=25, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation
We run monte_carlo numbers of bootstraps
Each one contains monte_length days drawn randomly, with replacement
(so *not* block bootstrapping)
The other arguments are passed to the optimisation function markosolver
Note - doesn't deal gracefully with missing data. Will end up downweighting stuff depending on how
much data is missing in each boostrap. You'll need to think about how to solve this problem.
"""
weightlist=[]
for unused_index in range(monte_carlo):
bs_idx=[int(random.uniform(0,1)*len(returns_to_bs)) for i in range(monte_length)]
returns=returns_to_bs.iloc[bs_idx,:]
weight=markosolver(returns, equalisemeans=equalisemeans, equalisevols=equalisevols, default_vol=default_vol, default_SR=default_SR)
weightlist.append(weight)
### We can take an average here; only because our weights always add up to 1. If that isn't true
### then you will need to some kind of renormalisation
theweights_mean=list(np.mean(weightlist, axis=0))
return theweights_mean
rawdata=read_ts_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/sectorreturns.csv")
rawdata=rawdata/100.0
bootstrap_portfolio(rawdata, equalisemeans=True, equalisevols=True, default_vol=0.2, default_SR=1.0) | gpl-2.0 |
3manuek/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
bfrickert/WFMU_Ken | py/GetComments.py | 2 | 1171 | import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
import time
import datetime
import sys
def scrapeWFMUPlaylist(url):
response = requests.get(url)
soup = BeautifulSoup(response.text)
table = soup.find(id='comments-table')
rows = table.findAll('tr')
cell = table.findAll('td')
values = []
try:
table = soup.find(id='comments-table')
rows = table.findAll('tr')
cell = table.findAll('td')
l = list()
for i in range(0, len(cell), 2):
txt = cell[i].findAll(text=True)
values.append(str.replace("".join(txt[6:len(txt)]).encode('ascii','ignore'),'\n',''))
except:
pass
df = pd.DataFrame(values[1:(len(values)-4)])
return df
df = pd.DataFrame()
url_df = pd.read_csv('data/{0}/playlists.tsv'.format(sys.argv[1]), sep='\t')
urls = [row[1] for index, row in url_df.iterrows()]
i = 0
for url in urls:
try:
i += 1
print str(i) + url
df = df.append(scrapeWFMUPlaylist(url))
except:
pass
df.to_csv('data/{0}/comments.txt'.format(sys.argv[1]), sep='\t')
| mit |
lepy/phuzzy | phuzzy/approx/model.py | 1 | 14732 | # -*- coding: utf-8 -*-
import collections
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
import phuzzy
import phuzzy.contrib.pydoe as pydoe
class Expression(object):
"""Approximate an expression of fuzzy numbers
"""
def __init__(self, **kwargs):
"""DOE(kwargs)"""
self.name = kwargs.get("name", "DOE N.N.")
self._designvars = collections.OrderedDict()
self.doe_training = None
self.doe_prediction = None
self.model = kwargs.get("model")
if "designvars" in kwargs:
self.add_designvars(kwargs.get("designvars"))
self.doe = DOE(designsvars=self._designvars.values, name="doe_prediction")
def __str__(self):
return "(Expression:'{o.name}', dv={d}".format(o=self,
d=self._designvars.keys(),
)
__repr__ = __str__
def generate_training_doe(self, name="train", n=10, method="lhs"):
"""generate train
:param name:
:param n:
:param method:
:return: doe
"""
self.doe_training = DOE(designvars=self.designvars.values(), name="doe_training")
self.doe_training.sample_doe(n=n, method=method)
def generate_prediction_doe(self, name="train", n=10, method="lhs"):
"""generate prediction doe
:param name:
:param n:
:param method:
:return: doe
"""
self.doe_prediction = DOE(designvars=self.designvars.values(), name="doe_prediction")
self.doe_prediction.sample_doe(n=n, method=method)
def predict(self, name=None):
"""predict function results"""
X = self.doe_prediction.samples[list(self.designvars.keys())]
y = self.model.predict(X)
self.results_prediction = pd.DataFrame({"res": y, "alpha": self.doe_prediction.samples.alpha})
print(1, self.results_training.head())
print(2, self.results_prediction.head())
df_res = pd.concat([self.results_training, self.results_prediction[["res", "alpha"]]])
if name is None:
name = "z"
z = phuzzy.FuzzyNumber.from_results(df_res, name=name)
z.convert_df(alpha_levels=11)
return z
@property
def designvars(self):
"""returns all design variables of doe
:return: dict of designvars
"""
return self._designvars
def add_designvar(self, designvar):
"""add design variable to doe
:param designvar: design variable
:return: None
"""
self._designvars[designvar.name] = designvar
def add_designvars(self, designvars):
"""add design variables to doe
:param designvars: list of design variables
:return: None
"""
for designvar in designvars:
self._designvars[designvar.name] = designvar
def update_training_results(self, df):
"""update training results for each data set of DoE sampling
:param df:
:return: None
"""
# TODO: implement real update ()
self.results_training = df
def eval(self):
"""evaluate (expensive) function
:param function:
:param kwargs:
:return:
"""
eval_args = []
for dv in self.designvars.values():
eval_args.append(self.doe_training.samples[dv.name])
# calculate results for each row/data set in sampling
# TODO: add scheduler for long running (persistant) jobs
f_approx = self.function(*eval_args)
df_res = pd.DataFrame({"alpha": self.doe_training.samples.alpha,
"res": f_approx})
self.update_training_results(df_res)
def fit_model(self, model=None):
"""
:return:
"""
X = self.doe_training.samples[list(self.designvars.keys())].values
y = self.results_training.res.values
if model is None:
model = "svr"
models = {"svr": self._get_svr,
"knn": self._get_knn}
get_model = models.get(model, "svr")
get_model(X, y)
def _get_svr(self, X, y):
svr = GridSearchCV(SVR(kernel='rbf', gamma=.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3, ],
"gamma": np.logspace(-2, 2, num=5)})
train_size = int(len(X) * .75)
logging.debug("train_size %s" % train_size)
svr.fit(X[:train_size], y[:train_size])
self.model = svr
def _get_knn(self, X, y):
n_neighbors = 5
weights = "distance"
knn = KNeighborsRegressor(n_neighbors, weights=weights).fit(X, y)
self.model = knn
def get_fuzzynumber_from_results(self, name=None):
"""
:return: FuzzyNumber
"""
fuzzynumber = phuzzy.approx.FuzzyNumber.from_results(self.results_training)
fuzzynumber.df_res = self.results_training.copy()
fuzzynumber.samples = self.doe_training.samples.copy()
if name is not None:
fuzzynumber.name = name
return fuzzynumber
class DOE(object):
"""Design of Experiment"""
MESHGRID = "meshgrid"
HALTON = "halton"
LHS = "lhs"
BOXBEHNKEN = "bb"
CCDESIGN = "cc"
def __init__(self, **kwargs):
"""DOE(kwargs)"""
self.name = kwargs.get("name", "DOE N.N.")
self._samples = pd.DataFrame()
self._designvars = collections.OrderedDict()
if "designvars" in kwargs:
self.add_designvars(kwargs.get("designvars"))
def __str__(self):
return "(DOE:'{o.name}', dv={l}, {n} samples)".format(o=self,
l=self._designvars.keys(),
n=len(self.samples))
__repr__ = __str__
def _get_samples(self):
"""returns all design points
:return: sample dataframe
"""
return self._samples
def _set_samples(self, value):
self._samples = value
samples = property(fget=_get_samples, fset=_set_samples, doc="dataframe of samples")
@property
def designvars(self):
"""returns all design variables of doe
:return: dict of designvars
"""
return self._designvars
def add_designvar(self, designvar):
"""add design variable to doe
:param designvar: design variable
:return: None
"""
self._designvars[designvar.name] = designvar
def add_designvars(self, designvars):
"""add design variables to doe
:param designvars: list of design variables
:return: None
"""
for designvar in designvars:
self._designvars[designvar.name] = designvar
def sample_doe(self, **kwargs):
"""generates samples for doe
:param method: 'meshgrid', 'lhs', 'bb', 'cc'
:return: samples
"""
methods = {self.MESHGRID: self.sample_meshgrid,
self.LHS: self.sample_lhs,
# self.HALTON : self.sample_halton,
self.BOXBEHNKEN: self.sample_bbdesign,
self.CCDESIGN: self.sample_ccdesign,
}
methodname = kwargs.get("method", self.MESHGRID)
method = methods.get(methodname)
self.samples = method(**kwargs)
return self.samples
def sample_meshgrid(self, **kwargs):
"""return all combinations (np.meshgrid)
:param kwargs:
:return: doe
"""
if len(self.designvars) == 1:
designvar = self.designvars.values()[0]
doe = pd.DataFrame.from_dict({designvar.name: designvar.samples})
doe['alpha'] = 0.
else:
X = [designvar._disretize_range(n=0) for designvar in self.designvars.values()]
Y = np.meshgrid(*X)
d = {}
for i, designvar in enumerate(self.designvars.values()):
d[designvar.name] = Y[i].ravel()
doe = pd.DataFrame.from_dict(d)
# doe['alpha'] = 0
for i, designvar in enumerate(self.designvars.values()):
alpha = designvar.get_alpha_from_value(doe.iloc[:, i])
doe["_alpha_%d" % i] = alpha
# print("alpha", designvar, alpha)
alpha_cols = [x for x in doe.columns if x.startswith("_alpha_")]
doe["_alpha"] = doe[alpha_cols].min(axis=1)
return doe
# def sample_halton(self, **kwargs):
# sample = kwargs.get("n", 10)
# pass
def sample_bbdesign(self, **kwargs):
"""Box-Behnken Sampling
:param n: number of sample points
:return: doe
"""
dim = len(self.designvars)
if dim < 3:
logging.error("Box-Behnken requires at least 3 dimensions!")
raise Exception("Box-Behnken requires at least 3 dimensions!")
doe = pd.DataFrame(columns=[x.name for x in self.designvars.values()])
print(doe.columns, dim)
doe.loc[0] = np.zeros(dim)
print(pydoe.bbdesign(3))
doelhs = pd.DataFrame(pydoe.bbdesign(dim), columns=[x.name for x in self.designvars.values()])
doe = pd.concat([doe, doelhs], ignore_index=True)
doe['_alpha'] = 0
for i, designvar in enumerate(self.designvars.values()):
doe.iloc[:, i] = doe.iloc[:, i] * (designvar.max() - designvar.min()) + designvar.min()
return doe
def sample_ccdesign(self, **kwargs):
"""Central composite design
:param n: number of sample points
:return: doe
"""
dim = len(self.designvars)
dv0 = list(self.designvars.values())[0]
# doe = pd.DataFrame([[x.ppf(.5) for x in self.designvars.values()]], columns=[x.name for x in self.designvars.values()])
doe = pd.DataFrame(columns=[x.name for x in self.designvars.values()])
doe_cc_raw = pd.DataFrame(pydoe.ccdesign(dim, face='ccf'), columns=[x.name for x in self.designvars.values()])
doe_cc_raw['_alpha'] = 0
samples = []
# for alphalevel in [0, len(dv0.df)-1]: # [0, -1, len(dv0.df)//2]:
for alphalevel in [0, len(dv0.df) // 2, len(dv0.df) - 1]: # [0, -1, len(dv0.df)//2]:
# for alphalevel in [0, len(dv0.df)//3, 2*len(dv0.df)//3, -1]: # [0, -1, len(dv0.df)//2]:
doe_cc = doe_cc_raw.copy()
for i, designvar in enumerate(self.designvars.values()):
rmin = designvar.df.iloc[alphalevel].l
rmax = designvar.df.iloc[alphalevel].r
doe_cc.iloc[:, i] = (doe_cc.iloc[:, i] + 1.) / 2. * (rmax - rmin) + rmin
alpha = designvar.df.iloc[alphalevel].alpha
doe_cc.iloc[:, dim] = alpha
samples.append(doe_cc)
doe = pd.concat([doe] + samples, ignore_index=True)
doe.drop_duplicates(inplace=True)
doe.reset_index(inplace=True)
return doe
def sample_lhs(self, **kwargs):
"""Latin Hypercube Sampling
:param n: number of sample points
:return: doe
"""
dim = len(self.designvars)
n_samples = kwargs.get("n", 10)
doe = pd.DataFrame(columns=[x.name for x in self.designvars.values()])
doe.loc[0] = np.zeros(len(self.designvars))
doelhs = pd.DataFrame(pydoe.lhs(dim, n_samples - 1), columns=[x.name for x in self.designvars.values()])
doe = pd.concat([doe, doelhs], ignore_index=True)
for i, designvar in enumerate(self.designvars.values()):
doe.iloc[:, i] = doe.iloc[:, i] * (designvar.max() - designvar.min()) + designvar.min()
for i, designvar in enumerate(self.designvars.values()):
alpha = designvar.get_alpha_from_value(doe.iloc[:, i])
doe["_alpha_%d" % i] = alpha
# print("alpha", designvar, alpha)
alpha_cols = [x for x in doe.columns if x.startswith("_alpha_")]
doe["_alpha"] = doe[alpha_cols].min(axis=1)
# doe['alpha'] = 0
print("_sample doe", doe.shape)
return doe
def sample_lhs2(self, **kwargs):
"""Latin Hypercube Sampling
:param n: number of sample points
:return: doe
"""
dim = len(self.designvars)
n_samples = kwargs.get("n", 100)
doe = pd.DataFrame(columns=[x.name for x in self.designvars.values()])
doelhs = pd.DataFrame(pydoe.lhs(dim, n_samples), columns=[x.name for x in self.designvars.values()])
print("doelhs", doelhs.shape)
# doe = pd.concat([doe, doelhs], ignore_index=True)
indices = list(self.designvars.values())[0].df.index
does = []
# doe.loc[0] = np.zeros(len(self.designvars))
for index in indices:
# print(index)
doe = doelhs.copy()
for i, designvar in enumerate(self.designvars.values()):
designvarmin = designvar.df.loc[index, "l"]
designvarmax = designvar.df.loc[index, "r"]
doe.iloc[:, i] = doe.iloc[:, i] * (designvarmax - designvarmin) + designvarmin
alpha = designvar.df.loc[index, "alpha"]
# if i==0:
# print("_"*80)
# print("alpha", alpha, designvar.name, designvarmin, designvarmax)
# print(doe.iloc[:, i])
doe.loc[:, "_alpha"] = alpha
does.append(doe.copy())
# print(does[0].iloc[:, 0])
doe = pd.concat(does, ignore_index=True)
# for i, designvar in enumerate(self.designvars.values()):
# alpha = designvar.get_alpha_from_value(doe.iloc[:, i])
# doe["_alpha_%d" % i] = alpha
# # print("alpha", designvar, alpha)
# alpha_cols = [x for x in doe.columns if x.startswith("_alpha_")]
# doe["_alpha"] = doe[alpha_cols].min(axis=1)
# doe['alpha'] = 0
# print("_sample doe", doe.tail())
return doe[[x.name for x in self.designvars.values()]+["_alpha"]]
def gen_lhs_samples(self, **kwargs):
"""Latin Hypercube Sampling
:param n: number of sample points
:return: doe
"""
dim = len(self.designvars)
n_samples = kwargs.get("n", 10)
doe = pd.DataFrame(columns=[x.name for x in self.designvars.values()])
doe.loc[0] = np.zeros(len(self.designvars))
doelhs = pd.DataFrame(pydoe.lhs(dim, n_samples), columns=[x.name for x in self.designvars.values()])
return doelhs
| mit |
adammenges/statsmodels | statsmodels/sandbox/examples/try_smoothers.py | 39 | 2655 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 01 15:17:52 2011
Author: Mike
Author: Josef
mainly script for checking Kernel Regression
"""
import numpy as np
if __name__ == "__main__":
#from statsmodels.sandbox.nonparametric import smoothers as s
from statsmodels.sandbox.nonparametric import smoothers, kernels
import matplotlib.pyplot as plt
#from numpy import sin, array, random
import time
np.random.seed(500)
nobs = 250
sig_fac = 0.5
#x = np.random.normal(size=nobs)
x = np.random.uniform(-2, 2, size=nobs)
#y = np.array([np.sin(i*5)/i + 2*i + (3+i)*np.random.normal() for i in x])
y = np.sin(x*5)/x + 2*x + sig_fac * (3+x)*np.random.normal(size=nobs)
K = kernels.Biweight(0.25)
K2 = kernels.CustomKernel(lambda x: (1 - x*x)**2, 0.25, domain = [-1.0,
1.0])
KS = smoothers.KernelSmoother(x, y, K)
KS2 = smoothers.KernelSmoother(x, y, K2)
KSx = np.arange(-3, 3, 0.1)
start = time.time()
KSy = KS.conf(KSx)
KVar = KS.std(KSx)
print(time.time() - start) # This should be significantly quicker...
start = time.time() #
KS2y = KS2.conf(KSx) #
K2Var = KS2.std(KSx) #
print(time.time() - start) # ...than this.
KSConfIntx, KSConfInty = KS.conf(15)
print("Norm const should be 0.9375")
print(K2.norm_const)
print("L2 Norms Should Match:")
print(K.L2Norm)
print(K2.L2Norm)
print("Fit values should match:")
#print zip(KSy, KS2y)
print(KSy[28])
print(KS2y[28])
print("Var values should match:")
#print zip(KVar, K2Var)
print(KVar[39])
print(K2Var[39])
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(x, y, "+")
ax.plot(KSx, KSy, "-o")
#ax.set_ylim(-20, 30)
ax2 = fig.add_subplot(222)
ax2.plot(KSx, KVar, "-o")
ax3 = fig.add_subplot(223)
ax3.plot(x, y, "+")
ax3.plot(KSx, KS2y, "-o")
#ax3.set_ylim(-20, 30)
ax4 = fig.add_subplot(224)
ax4.plot(KSx, K2Var, "-o")
fig2 = plt.figure()
ax5 = fig2.add_subplot(111)
ax5.plot(x, y, "+")
ax5.plot(KSConfIntx, KSConfInty, "-o")
import statsmodels.nonparametric.smoothers_lowess as lo
ys = lo.lowess(y, x)
ax5.plot(ys[:,0], ys[:,1], 'b-')
ys2 = lo.lowess(y, x, frac=0.25)
ax5.plot(ys2[:,0], ys2[:,1], 'b--', lw=2)
#need to sort for matplolib plot ?
xind = np.argsort(x)
pmod = smoothers.PolySmoother(5, x[xind])
pmod.fit(y[xind])
yp = pmod(x[xind])
ax5.plot(x[xind], yp, 'k-')
ax5.set_title('Kernel regression, lowess - blue, polysmooth - black')
#plt.show()
| bsd-3-clause |
hlin117/statsmodels | statsmodels/tools/tests/test_data.py | 36 | 1758 | import pandas
import numpy as np
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0,2,3,4,5,6,7,8,9])
def test_structarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X)
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_recarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X.view(np.recarray))
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_dataframe():
X = np.random.random((10,5))
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(vals, df.values)
np.testing.assert_equal(rnames.tolist(), df.index.tolist())
np.testing.assert_equal(cnames, df.columns.tolist())
def test_patsy_577():
X = np.random.random((10, 2))
df = pandas.DataFrame(X, columns=["var1", "var2"])
from patsy import dmatrix
endog = dmatrix("var1 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
| bsd-3-clause |
ebolyen/qiime2 | qiime2/core/testing/transformer.py | 1 | 4262 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
from qiime2 import Metadata
import pandas as pd
from .format import (
FourIntsDirectoryFormat,
MappingDirectoryFormat,
IntSequenceFormat,
IntSequenceFormatV2,
SingleIntFormat,
MappingFormat,
UnimportableFormat
)
from .plugin import dummy_plugin
@dummy_plugin.register_transformer
def _2(data: int) -> SingleIntFormat:
ff = SingleIntFormat()
with ff.open() as fh:
fh.write('%d\n' % data)
return ff
@dummy_plugin.register_transformer
def _5(ff: SingleIntFormat) -> int:
with ff.open() as fh:
return int(fh.read())
@dummy_plugin.register_transformer
def _7(data: list) -> IntSequenceFormat:
ff = IntSequenceFormat()
with ff.open() as fh:
for int_ in data:
fh.write('%d\n' % int_)
return ff
@dummy_plugin.register_transformer
def _77(data: list) -> IntSequenceFormatV2:
ff = IntSequenceFormatV2()
with ff.open() as fh:
fh.write('VERSION 2\n')
for int_ in data:
fh.write('%d\n' % int_)
return ff
@dummy_plugin.register_transformer
def _9(ff: IntSequenceFormat) -> list:
with ff.open() as fh:
return list(map(int, fh.readlines()))
@dummy_plugin.register_transformer
def _99(ff: IntSequenceFormatV2) -> list:
with ff.open() as fh:
fh.readline() # skip header
return list(map(int, fh.readlines()))
@dummy_plugin.register_transformer
def _10(ff: IntSequenceFormat) -> collections.Counter:
with ff.open() as fh:
return collections.Counter(map(int, fh.readlines()))
@dummy_plugin.register_transformer
def _1010(ff: IntSequenceFormatV2) -> collections.Counter:
with ff.open() as fh:
fh.readline() # skip header
return collections.Counter(map(int, fh.readlines()))
@dummy_plugin.register_transformer
def _1000(ff: IntSequenceFormat) -> IntSequenceFormatV2:
new_ff = IntSequenceFormatV2()
with new_ff.open() as new_fh, ff.open() as fh:
new_fh.write("VERSION 2\n")
for line in fh:
new_fh.write(line)
return new_ff
@dummy_plugin.register_transformer
def _11(data: dict) -> MappingDirectoryFormat:
df = MappingDirectoryFormat()
df.mapping.write_data(data, dict)
return df
@dummy_plugin.register_transformer
def _12(data: dict) -> MappingFormat:
ff = MappingFormat()
with ff.open() as fh:
for key, value in data.items():
fh.write('%s\t%s\n' % (key, value))
return ff
@dummy_plugin.register_transformer
def _13(df: MappingDirectoryFormat) -> dict:
# If this had been a `SingleFileDirectoryFormat` then this entire
# transformer would have been redundant (the framework could infer it).
return df.mapping.view(dict)
@dummy_plugin.register_transformer
def _14(ff: MappingFormat) -> dict:
data = {}
with ff.open() as fh:
for line in fh:
key, value = line.rstrip('\n').split('\t')
if key in data:
raise ValueError(
"mapping.txt file must have unique keys. Key %r was "
"observed more than once." % key)
data[key] = value
return data
@dummy_plugin.register_transformer
def _15(df: MappingDirectoryFormat) -> Metadata:
d = df.mapping.view(dict)
return Metadata(pd.DataFrame(d, index=["0"]))
@dummy_plugin.register_transformer
def _3(df: FourIntsDirectoryFormat) -> list:
# Note: most uses of `iter_views` will need to look at the first element
# of the series of tuples provided by iter_views
return [x for _, x in df.single_ints.iter_views(int)]
@dummy_plugin.register_transformer
def _1(data: list) -> FourIntsDirectoryFormat:
df = FourIntsDirectoryFormat()
for i, int_ in enumerate(data, 1):
df.single_ints.write_data(int_, int, num=i)
return df
@dummy_plugin.register_transformer
def _4(ff: UnimportableFormat) -> int:
return 1
| bsd-3-clause |
pypot/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
tetra5/radiance | logic/threads.py | 1 | 1875 | from PyQt4 import QtCore
class ImportingThread(QtCore.QThread):
"""
Imports modules in a thread. Heaviest imports should be initially here, just
to be sure that their importing at runtime won't cause UI to hang.
Unfortunately, pyinstaller does not recognize such importing, so the same
modules should be also fake-imported manually just to be included with
final distribution.
See radiance.pyinstaller_workaround()
"""
def __init__(self, parent=None):
QtCore.QThread.__init__(self, parent)
"""
List of modules to be loaded.
Each element should contain one of following:
('module', ['sub1', 'sub2']) # from module import sub1, sub2
'module' # import module
'module.sub1' # import module.sub1
Each module should be also duplicated at radiance.py, for
pyinstaller to work correctly.
"""
self.modules = [
# 'scipy',
'scipy.interpolate',
# 'matplotlib',
'sqlalchemy',
# 'numpy',
]
def run(self):
# Actual import.
for m in self.modules:
try:
modname, fromlist = m
except ValueError:
modname, fromlist = m, []
try:
m = __import__(modname, globals(), locals(), fromlist, -1)
except ImportError, e:
self.emit(QtCore.SIGNAL('terminated'),
'Threaded import: failed to import "%s": %s' % (modname, e))
else:
self.emit(QtCore.SIGNAL('set_message'), modname)
self.emit(QtCore.SIGNAL('finished()'))
| mit |
shanest/quantifier-rnn-learning | analysis.py | 1 | 12386 | """
Copyright (C) 2017 Shane Steinert-Threlkeld
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
from __future__ import division
from __future__ import print_function
from builtins import range
import itertools as it
import numpy as np
import scipy.stats as stats
from matplotlib import pyplot as plt
import util
COLORS = ['blue', 'red']
def experiment_analysis(path, quants, trials=list(range(30)), plots=True,
threshold=0.95, filename=None, size=None):
"""Prints statistical tests and makes plots for experiment one.
Args:
path: where the trials in CSV are
plots: whether to make plots or not
"""
# read the data in
data = util.read_trials_from_csv(path, trials)
# FILTER OUT TRIALS WHERE RNN DID NOT LEARN
remove_bad_trials(data, quants, threshold=threshold)
# get convergence points per quantifier
convergence_points = get_convergence_points(data, quants, threshold)
if plots:
# make plots
# make_boxplots(convergence_points, quants)
# make_barplots(convergence_points, quants)
make_plot(data, quants, ylim=(0.8, 1), threshold=threshold,
filename=filename, size=size)
print(stats.ttest_rel(convergence_points[quants[0]],
convergence_points[quants[1]]))
final_n = 50
final_means = [[forward_means(data[trial][quant + '_accuracy'].values,
window_size=final_n)[-final_n]
for quant in quants] for trial in data]
print('final means: {} - {}'.format(quants[0], quants[1]))
print(stats.ttest_rel(
[means[0] for means in final_means],
[means[1] for means in final_means]))
def experiment_one_a_analysis(filename=None, size=None):
experiment_analysis('data/exp1a',
['at_least_4', 'at_least_6_or_at_most_2'],
filename=filename, size=size)
def experiment_one_b_analysis(filename=None, size=None):
experiment_analysis('data/exp1b',
['at_most_3', 'at_least_6_or_at_most_2'],
filename=filename, size=size)
def experiment_two_a_analysis(filename=None, size=None):
experiment_analysis('data/exp2a',
['at_least_3', 'first_3'],
filename=filename, size=size)
def experiment_two_b_analysis(filename=None, size=None):
experiment_analysis('data/exp2b',
['at_least_3', 'last_3'],
filename=filename, size=size,
threshold=0.93)
def experiment_three_a_analysis(filename=None, size=None):
experiment_analysis('data/exp3a',
['not_all', 'not_only'],
filename=filename, size=size)
def experiment_three_b_analysis(filename=None, size=None):
experiment_analysis('data/exp3b',
['most', 'M'],
filename=filename, size=size)
def remove_bad_trials(data, quants, threshold=0.97):
"""Remove 'bad' trials from a data set. A trial is bad if it's not
the case that each quantifier's accuracy converged to a threshold.
The bad trials are deleted from data, but nothing is returned.
"""
bad_trials = set([])
for quant in quants:
accuracies = [data[key][quant + '_accuracy'].values for key in data]
forward_accs = [forward_means(accs) for accs in accuracies]
threshold_pos = [first_above_threshold(accs, threshold)
for accs in forward_accs]
# a trial is bad if the forward mean never hit 0.99
bad_trials |= set([idx for idx, thresh in enumerate(threshold_pos)
if thresh is None])
print('Number of bad trials: {}'.format(len(bad_trials)))
for trial in bad_trials:
del data[trial]
def get_convergence_points(data, quants, threshold):
"""Get convergence points by quantifier for the data.
Args:
data: a dictionary, intended to be made by util.read_trials_from_csv
quants: list of quantifier names
Returns:
a dictionary, with keys the quantifier names, and values the list of
the step at which accuracy on that quantifier converged on each trial.
"""
convergence_points = {q: [] for q in quants}
for trial in list(data.keys()):
for quant in quants:
convergence_points[quant].append(
data[trial]['global_step'][
convergence_point(
data[trial][quant + '_accuracy'].values,
threshold)])
return convergence_points
def diff(ls1, ls2):
"""List difference function.
Args:
ls1: first list
ls2: second list
Returns:
pointwise difference ls1 - ls2
"""
assert len(ls1) == len(ls2)
return [ls1[i] - ls2[i] for i in range(len(ls1))]
def forward_means(arr, window_size=250):
"""Get the forward means of a list. The forward mean at index i is
the sum of all the elements from i until i+window_size, divided
by the number of such elements. If there are not window_size elements
after index i, the forward mean is the mean of all elements from i
until the end of the list.
Args:
arr: the list to get means of
window_size: the size of the forward window for the mean
Returns:
a list, of same length as arr, with the forward means
"""
return [(sum(arr[idx:min(idx+window_size, len(arr))])
/ min(window_size, len(arr)-idx))
for idx in range(len(arr))]
def first_above_threshold(arr, threshold):
"""Return the point at which a list value is above a threshold.
Args:
arr: the list
threshold: the threshold
Returns:
the first i such that arr[i] > threshold, or None if there is not one
"""
means = forward_means(arr)
for idx in range(len(arr)):
if arr[idx] > threshold and means[idx] > threshold:
return idx
return None
def convergence_point(arr, threshold=0.95):
"""Get the point at which a list converges above a threshold.
Args:
arr: the list
threshold: the threshold
Returns:
the first i such that forward_means(arr)[i] is above threshold
"""
return first_above_threshold(arr, threshold)
def get_max_steps(data):
"""Gets the longest `global_step` column from a data set.
Args:
data: a dictionary, whose values are pandas.DataFrame, which have a
column named `global_step`
Returns:
the values for the longest `global_step` column in data
"""
max_val = None
max_len = 0
for key in list(data.keys()):
new_len = len(data[key]['global_step'].values)
if new_len > max_len:
max_len = new_len
max_val = data[key]['global_step'].values
return max_val
def make_plot(data, quants, ylim=None, threshold=0.95, filename=None, size=None):
"""Makes a line plot of the accuracy of trials by quantifier, color coded,
and with the medians also plotted.
Args:
data: the data
quants: list of quantifier names
ylim: y-axis boundaries
"""
assert len(quants) <= len(COLORS)
plt.figure(figsize=size or (18, 12))
trials_by_quant = [[] for _ in range(len(quants))]
for trial in list(data.keys()):
steps = data[trial]['global_step'].values
for idx in range(len(quants)):
trials_by_quant[idx].append(smooth_data(
data[trial][quants[idx] + '_accuracy'].values))
plt.plot(steps, trials_by_quant[idx][-1],
COLORS[idx], alpha=0.3)
# plot median lines
medians_by_quant = [get_median_diff_lengths(trials_by_quant[idx])
for idx in range(len(trials_by_quant))]
# get x-axis of longest trial
longest_x = get_max_steps(data)
for idx in range(len(quants)):
plt.plot(longest_x,
smooth_data(medians_by_quant[idx]),
COLORS[idx],
label='Q{}: {}'.format(idx, quants[idx].replace('_', ' ')),
linewidth=2)
max_x = max([len(ls) for ls in medians_by_quant])
plt.plot(longest_x, [threshold for _ in range(max_x)],
linestyle='dashed', color='green')
if ylim:
plt.ylim(ylim)
plt.xlabel('training steps')
plt.ylabel('test set accuracy')
plt.legend(loc=4, fontsize=12)
if filename:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
def get_median_diff_lengths(trials):
"""Get the point-wise median of a list of lists of possibly
different lengths.
Args:
trials: a list of lists, corresponding to trials
Returns:
a list, of the same length as the longest list in trials,
where the list at index i contains the median of all of the
lists in trials that are at least i long
"""
max_len = np.max([len(trial) for trial in trials])
# pad trials with NaN values to length of longest trial
trials = np.asarray(
[np.pad(trial, (0, max_len - len(trial)),
'constant', constant_values=np.nan)
for trial in trials])
return np.nanmedian(trials, axis=0)
def make_boxplots(convergence_points, quants):
"""Makes box plots of some data.
Args:
convergence_points: dictionary of quantifier convergence points
quants: names of quantifiers
"""
plt.boxplot([convergence_points[quant] for quant in quants])
plt.xticks(list(range(1, len(quants)+1)), quants)
plt.show()
def make_barplots(convergence_points, quants):
"""Makes bar plots, with confidence intervals, of some data.
Args:
convergence_points: dictionary of quantifier convergence points
quants: names of quantifiers
"""
pairs = list(it.combinations(quants, 2))
assert len(pairs) <= len(COLORS)
diffs = {pair: diff(convergence_points[pair[0]],
convergence_points[pair[1]])
for pair in pairs}
means = {pair: np.mean(diffs[pair]) for pair in pairs}
stds = {pair: np.std(diffs[pair]) for pair in pairs}
intervals = {pair: stats.norm.interval(
0.95, loc=means[pair],
scale=stds[pair]/np.sqrt(len(diffs[pair])))
for pair in pairs}
# plotting info
index = np.arange(len(pairs))
bar_width = 0.75
# reshape intervals to be fed to pyplot
yerrs = [[means[pair] - intervals[pair][0] for pair in pairs],
[intervals[pair][1] - means[pair] for pair in pairs]]
plt.bar(index, [means[pair] for pair in pairs], bar_width, yerr=yerrs,
color=[COLORS[idx] for idx in range(len(pairs))],
ecolor='black', align='center')
plt.xticks(index, pairs)
plt.show()
def smooth_data(data, smooth_weight=0.9):
"""Smooths out a series of data which might otherwise be choppy.
Args:
data: a line to smooth out
smooth_weight: between 0 and 1, for 0 being no change and
1 a flat line. Higher values are smoother curves.
Returns:
a list of the same length as data, containing the smooth version.
"""
prev = data[0]
smoothed = []
for point in data:
smoothed.append(prev*smooth_weight + point*(1-smooth_weight))
prev = smoothed[-1]
return smoothed
if __name__ == '__main__':
experiment_one_a_analysis(filename='data/exp1a_acc_small.png', size=(6, 4))
experiment_one_b_analysis(filename='data/exp1b_acc_small.png', size=(6, 4))
experiment_two_a_analysis(filename='data/exp2a_acc_small.png', size=(6, 4))
experiment_two_b_analysis(filename='data/exp2b_acc_small.png', size=(6, 4))
| gpl-3.0 |
dsm054/pandas | pandas/tests/tslibs/test_conversion.py | 7 | 2377 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import date_range
from pandas._libs.tslib import iNaT
from pandas._libs.tslibs import conversion, timezones
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: conversion.tz_convert_single(x, 'UTC', tz_didx.tz)
result = conversion.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: conversion.tz_convert_single(x, tz_didx.tz, 'UTC')
result = conversion.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
class TestTZConvert(object):
@pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo',
'US/Eastern', 'Europe/Moscow'])
def test_tz_convert_single_matches_tz_convert_hourly(self, tz):
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because
# of DST
compare_local_to_utc(tz_didx, utc_didx)
@pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo',
'US/Eastern', 'Europe/Moscow'])
@pytest.mark.parametrize('freq', ['D', 'A'])
def test_tz_convert_single_matches_tz_convert(self, tz, freq):
tz_didx = date_range('2000-01-01', '2020-01-01', freq=freq, tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq=freq)
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
@pytest.mark.parametrize('arr', [
pytest.param(np.array([], dtype=np.int64), id='empty'),
pytest.param(np.array([iNaT], dtype=np.int64), id='all_nat')])
def test_tz_convert_corner(self, arr):
result = conversion.tz_convert(arr,
timezones.maybe_get_tz('US/Eastern'),
timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, arr)
| bsd-3-clause |
jdfekete/progressivis | examples/test_mcscatterplot.py | 1 | 2384 | """
Test loading of nyc_taxis with dynamic queries.
"""
import time
import pandas as pd
import copy
from progressivis.core import Scheduler, Every
from progressivis.table import Table
from progressivis.vis import MCScatterPlot
from progressivis.io import CSVLoader
#from progressivis.datasets import get_dataset
from progressivis.table.constant import Constant
import asyncio as aio
def _filter(df):
pklon = df['pickup_longitude']
pklat = df['pickup_latitude']
dolon = df['dropoff_longitude']
dolat = df['dropoff_latitude']
return df[(pklon > -74.08) & (pklon < -73.5) & (pklat > 40.55) & (pklat < 41.00) &
(dolon > -74.08) & (dolon < -73.5) & (dolat > 40.55) & (dolat < 41.00)]
def _print_len(x):
if x is not None:
print(len(x))
#log_level() #package='progressivis.stats.histogram2d')
try:
s = scheduler
except NameError:
s = Scheduler()
#PREFIX= 'https://storage.googleapis.com/tlc-trip-data/2015/'
#SUFFIX= ''
PREFIX = '../nyc-taxi/'
SUFFIX = '.bz2'
"""
URLS = [
PREFIX+'yellow_tripdata_2015-01.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-02.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-03.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-04.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-05.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-06.csv'+SUFFIX,
]
"""
URLS = [f"https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2015-0{n}.csv" for n in range(1,7)]
FILENAMES = pd.DataFrame({'filename': URLS})
#import pdb;pdb.set_trace()
CST = Constant(Table('filenames', data=FILENAMES), scheduler=s)
CSV = CSVLoader(index_col=False, skipinitialspace=True,
usecols=['pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude'],
filter_=_filter, scheduler=s) # TODO: reimplement filter in read_csv.py
CSV.input.filenames = CST.output.result
PR = Every(scheduler=s)
PR.input.df = CSV.output.result
MULTICLASS = MCScatterPlot(scheduler=s, classes=[
('pickup', 'pickup_longitude', 'pickup_latitude'),
('dropoff', 'dropoff_longitude', 'dropoff_latitude')], approximate=True)
MULTICLASS.create_dependent_modules(CSV, 'result')
async def coro(s):
await aio.sleep(2)
print("awake after 2 sec.")
s.to_json()
if __name__ == '__main__':
aio.run(s.start(coros=[coro(s), aio.sleep(3600)]))
print(len(CSV.table()))
| bsd-2-clause |
ephes/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
jayflo/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 5 | 7177 | import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = np.linalg.norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
# check that if est doesn't have partial_fit, neither does SelectFromModel
transformer = SelectFromModel(estimator=RandomForestClassifier())
assert_false(hasattr(transformer, "partial_fit"))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold="0.1 * mean")
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = "1.0 * mean"
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
ephes/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/all_consumptions/plot_logement_decile.py | 4 | 2329 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 11:00:08 2015
@author: Etienne
"""
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from pandas import concat
from openfisca_france_indirect_taxation.examples.utils_example import simulate, df_weighted_average_grouped, \
graph_builder_line_percent
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
list_coicop12 = []
for coicop12_index in range(1, 13):
list_coicop12.append('coicop12_{}'.format(coicop12_index))
simulated_variables = [
'pondmen',
'niveau_vie_decile',
'somme_coicop12',
]
simulated_variables += list_coicop12
p = dict()
df_to_graph = None
for year in [2000, 2005, 2011]:
simulation_data_frame = simulate(simulated_variables = simulated_variables, year = year)
aggregates_data_frame = df_weighted_average_grouped(dataframe = simulation_data_frame,
groupe = 'niveau_vie_decile', varlist = simulated_variables)
aggregates_data_frame[year] = aggregates_data_frame['coicop12_4'] / aggregates_data_frame['somme_coicop12']
appendable = aggregates_data_frame[year]
if df_to_graph is not None:
df_to_graph = concat([df_to_graph, appendable], axis = 1)
else:
df_to_graph = appendable
graph_builder_line_percent(df_to_graph, 1, 1)
| agpl-3.0 |
dashee87/cluster-flag | setup.py | 1 | 1138 | from setuptools import setup
import clusterflag
setup(name='cluster-flag',
version=clusterflag.__version__,
url='https://github.com/dashee87/cluster-flag',
author='David Sheehan',
author_email='[email protected]',
description='Generate country flags with numpy and pandas',
keywords='country flag numpy pandas',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering :: Visualization'
],
license='MIT',
packages=['clusterflag'],
install_requires=[
'pandas>=0.17.1',
'numpy>=1.11.0'],
zip_safe=False) | mit |
dionyziz/rupture | etc/theory/experiments/rupture_performance/plot.py | 4 | 2506 | import matplotlib.pyplot as plt
from collections import OrderedDict
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 1
seconds = OrderedDict([
('aes128cbc', [0, 11, 8, 5, 6, 6, 11]), # 47
('aes128gcm', [0, 6, 8, 6, 5, 6, 7]), # 38
('aes256cbc', [0, 7, 7, 5, 6, 6, 9]), # 40
('aes256gcm', [0, 10, 8, 6, 8, 9, 7]) # 48
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_1.png'
'''
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 2
seconds = OrderedDict([
('aes128cbc', [0, 10, 11, 4, 6, 10, 17]), # 58
('aes128gcm', [0, 7, 6, 5, 6, 7, 6]), # 37
('aes256cbc', [0, 8, 7, 20, 7, 14, 14]), # 70
('aes256gcm', [0, 8, 6, 6, 5, 7, 7]), # 39
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_2.png'
'''
'''
# Divide&conquer adaptive (keeping only the last 2 known chars) on ruptureit, try 3
seconds = OrderedDict([
('aes128cbc', [0, 9, 7, 5, 8, 7, 8]), # 44
('aes128gcm', [0, 7, 9, 5, 6, 6, 7]), # 40
('aes256cbc', [0, 15, 7, 8, 8, 8, 14]), # 60
('aes256gcm', [0, 10, 6, 6, 5, 7, 6]), # 40
])
title = 'Rupture divide&conquer against block ciphers'
filename = 'div_3.png'
'''
# Serial adaptive (keeping only the last 2 known chars) on ruptureit
seconds = OrderedDict([
('aes128cbc', [0, 18, 16, 17, 17, 18, 17, 18, 18, 18, 17, 16, 20, 18, 33, 37, 17, 16, 16, 15, 16, 17, 19, 51]), # 465
('aes128gcm', [0, 19, 20, 19, 18, 17, 20, 19, 17, 16, 19, 16, 17, 17, 17, 19, 17, 17, 19, 18, 22, 17, 17, 20]), # 417
('aes256cbc', [0, 22, 18, 21, 19, 18, 37, 18, 19, 20, 19, 17, 19, 36, 18, 16, 18, 19, 18, 34, 18, 18, 18, 19]), # 479
('aes256gcm', [0, 18, 18, 21, 18, 21, 20, 18, 20, 22, 20, 18, 19, 16, 17, 18, 15, 15, 18, 17, 17, 16, 16, 18]) # 416
])
title = 'Rupture serial against block ciphers'
filename = 'rupture_serial_performance.png'
letters = [i for i in range(len(seconds['aes128cbc']))]
aggregated_seconds = OrderedDict()
for ciph, timings in seconds.items():
aggregated_seconds[ciph] = []
prev = 0
for t in timings:
aggregated_seconds[ciph].append(prev+t)
prev += t
font = {
'size': 12
}
plt.rc('font', **font)
plt.title(title, y=1.01)
plt.ylabel('Decrypted characters')
plt.xlabel('Time (sec)')
for i in aggregated_seconds:
plt.plot(aggregated_seconds[i], letters)
plt.legend([i for i in aggregated_seconds])
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.savefig(filename)
| mit |
bigdataelephants/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
broxtronix/thunder | python/thunder/extraction/block/methods/nmf.py | 8 | 3873 | from thunder.extraction.block.base import BlockMethod, BlockAlgorithm
from thunder.extraction.source import Source
class BlockNMF(BlockMethod):
def __init__(self, **kwargs):
algorithm = NMFBlockAlgorithm(**kwargs)
super(self.__class__, self).__init__(algorithm, **kwargs)
class NMFBlockAlgorithm(BlockAlgorithm):
"""
Find sources using non-negative matrix factorization on blocks.
NMF on each block provides a candidate set of basis functions.
These are then converted into regions using simple morphological operators:
labeling connected components, and removing all that fail to meet
min and max size thresholds.
Parameters
----------
maxIter : int, optional, default = 10
Maximum number of iterations
componentsPerBlock : int, optional, deafut = 3
Number of components to find per block
"""
def __init__(self, maxIter=10, componentsPerBlock=3, percentile=75,
minArea=50, maxArea="block", medFilter=2, overlap=0.4, **extra):
self.maxIter = maxIter
self.componentsPerBlock = componentsPerBlock
self.percentile = percentile
self.minArea = minArea
self.maxArea = maxArea
self.medFilter = medFilter if medFilter is not None and medFilter > 0 else None
self.overlap = overlap if overlap is not None and overlap > 0 else None
def extract(self, block):
from numpy import clip, inf, percentile, asarray, where, size, prod, unique
from scipy.ndimage import median_filter
from sklearn.decomposition import NMF
from skimage.measure import label
from skimage.morphology import remove_small_objects
# get dimensions
n = self.componentsPerBlock
dims = block.shape[1:]
# handle maximum size
if self.maxArea == "block":
maxArea = prod(dims) / 2
else:
maxArea = self.maxArea
# reshape to be t x all spatial dimensions
data = block.reshape(block.shape[0], -1)
# build and apply NMF model to block
model = NMF(n, max_iter=self.maxIter)
model.fit(clip(data, 0, inf))
# reconstruct sources as spatial objects in one array
comps = model.components_.reshape((n,) + dims)
# convert from basis functions into shape
# by median filtering (optional), applying a threshold,
# finding connected components and removing small objects
combined = []
for c in comps:
tmp = c > percentile(c, self.percentile)
regions = remove_small_objects(label(tmp), min_size=self.minArea)
ids = unique(regions)
ids = ids[ids > 0]
for ii in ids:
r = regions == ii
if self.medFilter is not None:
r = median_filter(r, self.medFilter)
coords = asarray(where(r)).T
if (size(coords) > 0) and (size(coords) < maxArea):
combined.append(Source(coords))
# merge overlapping sources
if self.overlap is not None:
# iterate over source pairs and find a pair to merge
def merge(sources):
for i1, s1 in enumerate(sources):
for i2, s2 in enumerate(sources[i1+1:]):
if s1.overlap(s2) > self.overlap:
return i1, i1 + 1 + i2
return None
# merge pairs until none left to merge
pair = merge(combined)
testing = True
while testing:
if pair is None:
testing = False
else:
combined[pair[0]].merge(combined[pair[1]])
del combined[pair[1]]
pair = merge(combined)
return combined
| apache-2.0 |
Averroes/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
narendrameena/featuerSelectionAssignment | crossValidation.py | 1 | 3370 | import numpy as np
from sklearn import cross_validation
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.datasets import load_svmlight_file
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
import matplotlib.pyplot as plt
#data
data = load_svmlight_file("leu")
#X_1 = data[0].todense().tolist() # samples 72 features above 7192
#y_1 = map(int,data[1]) # classes 2
print(data[0].shape)
print(data[1].shape)
# value of C
c = 1.9 #SVM soft-margin constant
print("SVM soft-margin constant %d", c)
#test_size= 0.2 # selecting number of samples
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(data[0],data[1], test_size=0.2, random_state=1) # use it for subsampling
'''
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
'''
#L1 SVM
clf = LinearSVC(penalty='l1', dual=False, C=c)
scores = cross_validation.cross_val_score(clf, data[0], data[1], cv=10)
print(scores)
print("L1 SVM \n Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#L2 SVM trained on all the features
clf = LinearSVC(penalty='l2',dual=False,C=c)
scores = cross_validation.cross_val_score(clf, data[0], data[1], cv=10)
print(scores)
print("L2 SVM trained on all the features \n Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#L2 SVM trained on the features selected by the L1 SVM
clf = LinearSVC(penalty='l1', dual=False,C=c).fit(data[0], data[1])
model = SelectFromModel(clf, prefit=True)
X = model.transform(data[0])
print(X.shape)
clf = LinearSVC(penalty='l2',dual=False,C =c)
scores = cross_validation.cross_val_score(clf, X, data[1], cv=10)
print(scores)
print("L2 SVM trained on the features selected by the L1 SVM. \n Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
#L2 SVM that use the class RFECV which automatically selects the number of features
clf = LinearSVC(penalty='l2',dual=False, C= c)
# The "accuracy" scoring is proportional to the number of correct
# classifications
print(data[1].shape)
rfecv = RFECV(estimator=clf, step=1, cv=StratifiedKFold(data[1], 2),scoring='accuracy')
rfecv.fit(data[0], data[1])
#scores = cross_validation.cross_val_score(rfecv, data[0], data[1], cv=10)
print("Optimal number of features : %d" % rfecv.n_features_)
scores = rfecv.grid_scores_
print(scores)
print("L2 SVM that use the class RFECV which automatically selects the number of features. \n Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
'''
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
'''
#L2 SVM that uses RFE (with an L2-SVM) to select relevant features
clf = LinearSVC(penalty='l2',dual=False, C =c)
rfe = RFE(estimator=clf, n_features_to_select=10, step=1)
rfe.fit(data[0], data[1])
scores = cross_validation.cross_val_score(rfe.estimator_ , data[0], data[1], cv=10)
print(scores)
print("L2 SVM that uses RFE (with an L2-SVM) to select relevant features. \n Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
| cc0-1.0 |
xu-hong/AirbnbScrape | DataCleanAirbnb.py | 3 | 3629 | import pandas as pd
import numpy as np
import datetime as dt
import sexmachine.detector as gender
# Read data as a pandas dataframe
def DataClean(data):
#convert datetime to membership length
data['MemberDateNew'] = pd.DataFrame(data['MemberDate']).applymap(DeleteSpace).MemberDate
data['MemberLength'] = dt.datetime.strptime('2014-11-25', '%Y-%m-%d') - data.MemberDateNew.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))
data.loc[:, 'MemberLength'] = data['MemberLength'].apply(TimeDelta)
# Define d to detect gender
data['HostGender'] = gender(data)
#parse short description
data['SD_PropType'] = data.ShortDesc.apply(getPropType)
data['SD_NumReviews'] = data.ShortDesc.apply(getNumReviews)
data['SD_Neighborhood'] = data.ShortDesc.apply(getNeighborhood)
# Save the clearned dat
data.to_csv('Final_v2.csv')
#return the cleaned variables
return data[['MemberLength', 'HostGender','SD_PropType','SD_NumReviews', 'SD_Neighborhood']]
# Create a new variable 'MemberDateNew' indicating the date when the host became a member
# The 1st day of the month is used for all hosts
def DeleteSpace(x):
try:
return str(dt.datetime.strptime(' ' .join(x.split(' ')[-2:]), '%B %Y').date())
except:
return '9999-01-01' # for errorneous values return 9999-01-01
# Create a new variable 'Memberlength' indicating days of membership of the host
def TimeDelta(x):
days = x.astype('timedelta64[D]')
return days / np.timedelta64(1, 'D')
# Create a list of host genders and attach the list to the dataset
def gender(dataframe):
gender_list = []
import sexmachine.detector as gender
d = gender.Detector()
#read in host names
host_name = dataframe["HostName"]
#for loop to loop in every host name and judge the gender
for hostname in host_name:
if "&" in hostname or "And" in hostname or "/" in hostname:
gender = "couple"
else :
first_name = hostname.split(" ")[0]
gender = d.get_gender(first_name).encode('utf8')
gender_list.append(gender)
return gender_list
def parseShortDesc(x, index):
"""
This is a helper function that parses the ShortDesc field in the raw data into a
list of three distinct values
"""
parsedVals = []
finalVals = []
#The data had two different type of "delimiters" in the Short Desc field, we try one, and if that doesn't
#work, we try the other one.
vals = x.split('\xcc\xe2\xe5\xe1')
if len(vals) < 2:
vals = x.split('\x95\xc0_\x95\xc0_')
#Clean Up non-essential words in the results set such as spaces, newlines and the word "reviews"
for val in vals:
temp = val.strip().replace(' reviews', '').replace(' review', '')
parsedVals.append(temp)
#Determine if there are missing values in the list and fill in the blanks
if len(parsedVals) == 3:
finalVals = parsedVals[:]
if len(parsedVals) == 2:
finalVals = [parsedVals[0], 0, parsedVals[1]]
if len(parsedVals) < 2:
finalVals = ['Unknown', 0, 'Unknown']
return finalVals[index]
def getPropType(x):
"""
Gets the property type from the
ShortDesc field
"""
return parseShortDesc(x, 0)
def getNumReviews(x):
"""
Gets the number of reviews from the
ShortDesc field
"""
return parseShortDesc(x,1)
def getNeighborhood(x):
"""
Gets the neighborhood from the
ShortDesc field
"""
return parseShortDesc(x, 2)
| mit |
bzero/networkx | examples/graph/napoleon_russian_campaign.py | 44 | 3216 | #!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
pythonvietnam/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
BradAJ/zipflights | zipflights_data_parsing.py | 1 | 11460 | import pandas as pd
import numpy as np
#Columns of interest in BTS OrigDest:Market Survey csv.
USECOLS = [u'MktCoupons',
u'Year',
u'Quarter',
u'Origin',
u'Dest',
u'AirportGroup',
u'TkCarrierChange',
u'TkCarrierGroup',
u'OpCarrierChange',
u'OpCarrierGroup',
u'RPCarrier',
u'TkCarrier',
u'OpCarrier',
u'BulkFare',
u'Passengers',
u'MktFare',
u'MktDistance',
u'MktMilesFlown',
u'NonStopMiles',
u'ItinGeoType',
u'MktGeoType']
def survey_load_strip_save(filepath_in, filepath_out):
"""
filepath_in: path to survey csv, e.g.:
'Origin_and_Destination_Survey_DB1BMarket_2013_3.csv'
filepath_out: path to stripped version of csv, e.g.:
'OrigDest_stripped_2013q3.csv'
Load BTS OrigDest:Market Quarterly Survey csv into a dataframe
and strip out the repetitive columns and save as a new csv that
will be concatenated with data from other quarters.
The data contains a large number of listed prices between zero and
five dollars, according to the BTS these are largely from tickets
bought on contract where the actual amount paid is proprietary.
These tickets are irrelevant to our analysis so they are also
stripped out here.
"""
df = pd.read_csv(filepath_in)
df = df[USECOLS]
df = df.loc[df.MktFare >= 6.]
df.to_csv(filepath_out)
def alphabetize_airports(od_df):
"""
od_df: Dataframe of OrigDest survey data (most likely
stripped of irrevelant columns with survey_load_strip_save,
and concatenated with other quarters)
Our analysis treats flights from LAX to JFK the same as flights
from JFK to LAX, so add columns 'apt1' and 'apt2' that contain
airport codes where 'apt1' precedes 'apt2' in the alphabet.
"""
od_df['apt1'] = None
od_df['apt2'] = None
od_df.loc[od_df.Origin < od_df.Dest, 'apt1'] = od_df.Origin
od_df.loc[od_df.Origin > od_df.Dest, 'apt1'] = od_df.Dest
od_df.loc[od_df.Origin < od_df.Dest, 'apt2'] = od_df.Dest
od_df.loc[od_df.Origin > od_df.Dest, 'apt2'] = od_df.Origin
def groupby_route_medians(od_df):
"""
od_df: Dataframe of OrigDest survey (with 'apt1' and 'apt2' columns
from alphabetize_airports func.)
Returns: Dataframe with one row for each 'apt1' to 'apt2' route
these columns are alphabetized by airport code, so data on flights
between JFK and LAX will be listed as such (and not LAX, JFK).
"""
cols_in = ['MktFare', 'NonStopMiles', 'Origin']
od_meds_dists = od_df.groupby(by=['apt1', 'apt2'])[cols_in].agg([np.median, len])
od_meds_dists = od_meds_dists.reset_index()
cols_out = ['apt1', 'apt2', 'MktFareMed', 'Count', 'NonStopMiles', 'Count_dup']
od_meds_dists.columns = cols_out
return od_meds_dists[cols_out[:-1]]
def fare_dist_fit(od_meds_df):
"""
od_meds_df: Dataframe of Fare, Distances and Ticket count data
(generated with groupby_route_medians)
Fit a line to median price versus distance of an airline route,
weighted by the number of passengers on the route. Add columns
to the dataframe 'pred_price' that is the price expected by this
linear model based on the distance of a route, and 'delta_price'
that is 'pred_price' - actual median price.
"""
x = od_meds_df['NonStopMiles']
y = od_meds_df['MktFareMed']
weight = od_meds_df['Count']
fit = np.polyfit(x, y, 1, w = weight)
od_meds_df['pred_price'] = np.polyval(fit, od_meds_df.NonStopMiles)
od_meds_df['delta_price'] = od_meds_df.pred_price - od_meds_df.MktFareMed
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
(shamelessly stolen from stack exchange)
"""
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return (average, np.sqrt(variance))
wav_str = "Weighted avg and std of predicted prices. USED IN WEB-APP:"
print wav_str, weighted_avg_and_std(od_meds_df.delta_price, weight)
def dests_by_rank_preds_by_dest(od_meds_df):
"""
od_meds_df: Dataframe of Fare, Distances, etc. including 'pred_price'
(see fare_dist_fit)
Returns: Two Dict of dicts of the form {Origin_code: {1: Dest_Code1, 2: Dest_Code2, ...}}
and {Origin_code:{Dest_code:pred_price, ...}}
Generate a dict to be used to in web app that, given an origin airport code as key,
the value is a dict where the keys are ranks 1 to n and values are airports such that
{'JFK': {1: 'LAX'}} would mean that 'LAX' is the best value destination from 'JFK', based
on the linear fit from fare_dist_fit(). Also return Orig to Dest to Predicted price dict.
"""
#make JFK->LAX and LAX->JFK separate entries for grouping purposes
#also require at least 100 entries on a given route for it to be included in rankings
od_meds_dfcop = od_meds_df.loc[od_meds_df.Count >= 100].copy()
od_meds_dfcop['apt1'] = od_meds_df['apt2']
od_meds_dfcop['apt2'] = od_meds_df['apt1']
od_dub = pd.concat([od_meds_df.loc[od_meds_df.Count >= 100], od_meds_dfcop])
od_dub.index = range(len(od_dub))
od_dub['dest_value_rank'] = od_dub.groupby('apt1')['delta_price'].rank(ascending = False)
rank_to_dest_d = dict()
dest_to_pred_d = dict()
for orig, dest, rank, pred in np.array(od_dub[['apt1', 'apt2', 'dest_value_rank', 'pred_price']]):
if orig in rank_to_dest_d:
rank_to_dest_d[orig][rank] = dest
else:
rank_to_dest_d[orig] = {rank: dest}
if orig in dest_to_pred_d:
dest_to_pred_d[orig][dest] = pred
else:
dest_to_pred_d[orig] = {dest: pred}
for orig in rank_to_dest_d:
rank_to_dest_d[orig]['max_rank'] = max(rank_to_dest_d[orig].keys())
return rank_to_dest_d, dest_to_pred_d
def make_hidden_cities_d(od_df):
"""
od_df: OrigDest dataframe, with MktFare >= 6.
Returns: Dict of the form {(Origin_code, Dest_code): [Fake_Dest_code, ...]}
Aggregate nonstop routes with routes with stops that start with the given
nonstop, i.e. LAX->JFK is grouped with LAX->JFK->BOS, LAX->JFK->IAD, etc.
Then identify the routes with changes that have lower median prices than
the corresponding nonstop. i.e. if LAX->JFK costs $300 and LAX->JFK->BOS
costs $250 and LAX->JFK->IAD costs $350 then include the BOS route but not
the IAD route.
"""
od_routings = od_df.groupby(['Origin', 'Dest', 'AirportGroup'])['MktFare'].agg([np.median, len])
od_routings = od_routings.reset_index()
#require routings to have at least 50 entries to be considered
od_routings = od_routings.loc[od_routings.len >= 50]
route_d = dict()
for route, fare in np.array(od_routings[['AirportGroup', 'median']]):
route_l = route.split(':')
orig = route_l[0]
for dest in route_l[1:]:
if (orig, dest) in route_d:
route_d[(orig, dest)][route] = fare
else:
route_d[(orig, dest)] = {route:fare}
nonstop_route_deals = dict()
for route_tup in route_d:
nonstop_s = route_tup[0] + ':' + route_tup[1]
if nonstop_s in route_d[route_tup]:
nonstop_fare = route_d[route_tup][nonstop_s]
for subroute in route_d[route_tup]:
subroute_l = subroute.split(':')
if route_d[route_tup][subroute] < nonstop_fare and subroute_l[1] == route_tup[1]:
if route_tup in nonstop_route_deals:
nonstop_route_deals[route_tup][subroute] = route_d[route_tup][subroute]
else:
nonstop_route_deals[route_tup] = {subroute: route_d[route_tup][subroute]}
hidden_cities_d = dict()
for route_tup in nonstop_route_deals:
hiddens = set()
for route_s in nonstop_route_deals[route_tup]:
hiddens.add(route_s.split(':')[-1])
hidden_cities_d[route_tup] = list(hiddens)
hidden_cities_paired_d = dict()
for route_tup in hidden_cities_d:
if (route_tup[1], route_tup[0]) in hidden_cities_d:
hidden_cities_paired_d[route_tup] = hidden_cities_d[route_tup]
return hidden_cities_paired_d
def od_codes_to_airport_name_d(dest_to_pred_d):
"""
dest_to_pred_d: dict containing origin airport codes
generated by dests_by_rank_preds_by_dest()
Returns: dict of airport code to its full name. Some
airports (those with fewer than 10000 passengers per Year)
are not listed in the wikipedia table. These are removed.
#ASSUMES airport_list.html is in current directory.
#This file was taken from:
http://en.wikipedia.org/wiki/List_of_airports_in_the_United_States
#In March 2014.
"""
htmdf = pd.read_html('airport_list.html', skiprows=2, infer_types = False)[0] #read_html assumes multiple tables per page
htmdf.columns = ['city', 'FAA', 'IATA', 'ICAO', 'airport', 'role', 'enplanements']
htmdf = htmdf.loc[htmdf.airport != '']
htmdf.index = range(len(htmdf))
def shorten_airports(apt_s):
if "International" in apt_s:
return apt_s.split("International")[0]
elif "Regional" in apt_s:
return apt_s.split("Regional")[0]
elif "Airport" in apt_s:
return apt_s.split("Airport")[0]
else:
return apt_s + ' '
htmdf['apt_v1'] = htmdf['airport'].apply(shorten_airports)
def rewrite_airports(x):
if x['city'] in x['apt_v1']:
return x['apt_v1']+'('+x['IATA']+')'
else:
return x['apt_v1'].rstrip(' ')+', '+x['city']+' ('+x['IATA']+')'
htmdf['apt_info'] = htmdf.apply(rewrite_airports, axis = 1)
apt_code_city_d = dict()
for code in dest_to_pred_d:
city_name = htmdf.loc[htmdf.IATA == code, 'apt_info'].all()
if type(city_name) is not bool:
apt_code_city_d[code] = city_name
#FIX some airport names by hand:
apt_code_city_d['AVP'] = 'Wilkes-Barre/Scranton (AVP)'
apt_code_city_d['AZO'] = 'Kalamazoo/Battle Creek (AZO)'
apt_code_city_d['BWI'] = 'Baltimore/Washington (BWI)'
apt_code_city_d['CAK'] = 'Akron/Canton (CAK)'
apt_code_city_d['CHO'] = 'Charlottesville (CHO)'
apt_code_city_d['CRP'] = 'Corpus Christi (CRP)'
apt_code_city_d['CVG'] = 'Cincinnati/Northern Kentucky (CVG)'
apt_code_city_d['DCA'] = 'Washington National (DCA)'
apt_code_city_d['DFW'] = 'Dallas/Fort Worth (DFW)'
apt_code_city_d['DTW'] = 'Detroit Metropolitan Wayne County (DTW)'
apt_code_city_d['FLL'] = 'Fort Lauderdale (FLL)'
apt_code_city_d['IAD'] = 'Washington Dulles (IAD)'
apt_code_city_d['LIH'] = 'Lihue (LIH)'
apt_code_city_d['LNY'] = 'Lanai City (LNY)'
apt_code_city_d['PIE'] = 'St. Petersburg/Clearwater (PIE)'
apt_code_city_d['SJC'] = 'Norman Y. Mineta San Jose (SJC)'
apt_code_city_d['SJU'] = 'San Juan / Carolina (SJU)'
return apt_code_city_d
| mit |
nhuntwalker/astroML | book_figures/chapter4/fig_lyndenbell_toy.py | 3 | 4082 | """
Luminosity function code on toy data
------------------------------------
Figure 4.9.
An example of using Lynden-Bell's C- method to estimate a bivariate
distribution from a truncated sample. The lines in the left panel show the true
one-dimensional distributions of x and y (truncated Gaussian distributions).
The two-dimensional distribution is assumed to be separable; see eq. 4.85.
A realization of the distribution is shown in the right panel, with a
truncation given by the solid line. The points in the left panel are computed
from the truncated data set using the C- method, with error bars from 20
bootstrap resamples.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from astroML.lumfunc import bootstrap_Cminus
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define and sample our distributions
N = 10000
np.random.seed(42)
# Define the input distributions for x and y
x_pdf = stats.truncnorm(-2, 1, 0.66666, 0.33333)
y_pdf = stats.truncnorm(-1, 2, 0.33333, 0.33333)
x = x_pdf.rvs(N)
y = y_pdf.rvs(N)
# define the truncation: we'll design this to be symmetric
# so that xmax(y) = max_func(y)
# and ymax(x) = max_func(x)
max_func = lambda t: 1. / (0.5 + t) - 0.5
xmax = max_func(y)
xmax[xmax > 1] = 1 # cutoff at x=1
ymax = max_func(x)
ymax[ymax > 1] = 1 # cutoff at y=1
# truncate the data
flag = (x < xmax) & (y < ymax)
x = x[flag]
y = y[flag]
xmax = xmax[flag]
ymax = ymax[flag]
x_fit = np.linspace(0, 1, 21)
y_fit = np.linspace(0, 1, 21)
#------------------------------------------------------------
# compute the Cminus distributions (with bootstrap)
x_dist, dx_dist, y_dist, dy_dist = bootstrap_Cminus(x, y, xmax, ymax,
x_fit, y_fit,
Nbootstraps=20,
normalize=True)
x_mid = 0.5 * (x_fit[1:] + x_fit[:-1])
y_mid = 0.5 * (y_fit[1:] + y_fit[:-1])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2))
fig.subplots_adjust(bottom=0.2, top=0.95,
left=0.1, right=0.92, wspace=0.25)
# First subplot is the true & inferred 1D distributions
ax = fig.add_subplot(121)
ax.plot(x_mid, x_pdf.pdf(x_mid), '-k', label='$p(x)$')
ax.plot(y_mid, y_pdf.pdf(y_mid), '--k', label='$p(y)$')
ax.legend(loc='lower center')
ax.errorbar(x_mid, x_dist, dx_dist, fmt='ok', ecolor='k', lw=1, ms=4)
ax.errorbar(y_mid, y_dist, dy_dist, fmt='^k', ecolor='k', lw=1, ms=4)
ax.set_ylim(0, 1.8)
ax.set_xlim(0, 1)
ax.set_xlabel('$x$, $y$')
ax.set_ylabel('normalized distribution')
# Second subplot is the "observed" 2D distribution
ax = fig.add_subplot(122)
H, xb, yb = np.histogram2d(x, y, bins=np.linspace(0, 1, 41))
plt.imshow(H.T, origin='lower', interpolation='nearest',
extent=[0, 1, 0, 1], cmap=plt.cm.binary)
cb = plt.colorbar()
x_limit = np.linspace(-0.1, 1.1, 1000)
y_limit = max_func(x_limit)
x_limit[y_limit > 1] = 0
y_limit[x_limit > 1] = 0
ax.plot(x_limit, y_limit, '-k')
ax.set_xlim(0, 1.1)
ax.set_ylim(0, 1.1)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
cb.set_label('counts per pixel')
ax.text(0.93, 0.93, '%i points' % len(x), ha='right', va='top',
transform=ax.transAxes)
plt.show()
| bsd-2-clause |
Knewton/lentil | tests/test_est.py | 2 | 9820 | """
Module for unit tests that check if parameter estimation converges for toy examples
@author Siddharth Reddy <[email protected]>
"""
import copy
import unittest
import logging
import pandas as pd
import numpy as np
from lentil import models
from lentil import est
from lentil import toy
logging.basicConfig()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class TestEstimators(unittest.TestCase):
def setUp(self):
# fixes random initializations of parameters
# before parameter estimation
np.random.seed(1997)
def tearDown(self):
pass
def test_1d_embedding(self):
"""
A one-dimensional embedding, where a single latent skill is enough
to explain the data. The key observation here is that the model
recovered positive skill gains for $L_1$, and "correctly" arranged
students and assessments in the latent space. Initially, Carter
fails both assessments, so his skill level is behind the requirements
of both assessments. Lee passes A1 but fails A2, so his skill
level is beyond the requirement for A1, but behind the requirement
for A2. In an effort to improve their results, Lee and Carter
complete lesson $L_1$ and retake both assessments. Now Carter passes
A1, but still fails A2, so his new skill level is ahead of the
requirements for A1 but behind the requirements for A2. Lee
passes both assessments, so his new skill level exceeds the requirements
for A1 and A2. This clear difference in results before completing
lesson $L_1$ and after completing the lesson implies that $L_1$ had a
positive effect on Lee and Carter's skill levels, hence the non-zero
skill gain vector recovered for $L_1$.
"""
history = toy.get_1d_embedding_history()
embedding_dimension = 1
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-6
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
model = models.EmbeddingModel(
history,
embedding_dimension,
using_lessons=True,
using_prereqs=False,
using_bias=False,
using_l1_regularizer=using_l1_regularizer,
learning_update_variance_constant=0.5)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
def test_assessment_grid(self):
"""
A two-dimensional grid of assessments and a single student
somewhere in the middle of it
"""
embedding_kwargs = {
'embedding_dimension' : 2,
'using_lessons' : False,
'using_prereqs' : False,
'using_bias' : False,
'learning_update_variance_constant' : 0.5
}
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-3
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
embedding_kwargs.update({'using_l1_regularizer' : using_l1_regularizer})
model = toy.get_assessment_grid_model(embedding_kwargs)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
def test_independent_assessments(self):
"""
A two-dimensional embedding, where an intransitivity in assessment
results requires more than one latent skill to explain. The key
observation here is that the assessments are embedded on two different
axes, meaning they require two completely independent skills. This
makes sense, since student results on A1 are uncorrelated with
results on A2. Fogell fails both assessments, so his skill levels
are behind the requirements for A1 and A2. McLovin passes both
assessments, so his skill levels are beyond the requirements for A1
and A2. Evan and Seth are each able to pass one assessment but not
the other. Since the assessments have independent requirements, this
implies that Evan and Seth have independent skill sets
(i.e. Evan has enough of skill 2 to pass A2 but not enough of
skill 1 to pass A1, and Seth has enough of skill 1 to pass A1
but not enough of skill 2 to pass A2).
"""
history = toy.get_independent_assessments_history()
embedding_dimension = 2
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-6
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=False,
using_bias=False,
using_l1_regularizer=using_l1_regularizer,
learning_update_variance_constant=0.5)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
def test_independent_lessons(self):
"""
We replicate the setting in test_independent_assessments, then add two
new students Slater and Michaels, and two new lesson modules $L_1$
and L2. Slater is initially identical to Evan, while Michaels is
initially identical to Seth. Slater reads lesson $L_1$, then passes
assessments A1 and A2. Michaels reads lesson L2, then passes
assessments A1 and A2. The key observation here is that the skill
gain vectors recovered for the two lesson modules are orthogonal,
meaning they help students satisfy completely independent skill
requirements. This makes sense, since initially Slater was lacking
in Skill 1 while Michaels was lacking in Skill 2, but after completing
their lessons they passed their assessments, showing that they gained
from their respective lessons what they were lacking initially.
"""
history = toy.get_independent_lessons_history()
embedding_dimension = 2
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-6
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=True,
using_bias=False,
using_l1_regularizer=using_l1_regularizer,
learning_update_variance_constant=0.5)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
def test_lesson_prereqs(self):
"""
We replicate the setting in test_independent_assessments, then add a new
assessment module A3 and a new lesson module L1. All students
initially fail assessment A3, then read lesson L1, after which
McLovin passes A3 while everyone else still fails A3. The key
observation here is that McLovin is the only student who initially
satisfies the prerequisites for L1, so he is the only student who
realizes significant gains.
"""
history = toy.get_lesson_prereqs_history()
embedding_dimension = 2
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-6
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=True,
using_bias=False,
using_l1_regularizer=using_l1_regularizer,
learning_update_variance_constant=0.5)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
def test_using_bias(self):
"""
Try using bias terms in assessment result likelihood
"""
history = toy.get_1d_embedding_history()
embedding_dimension = 2
estimator = est.EmbeddingMAPEstimator(
regularization_constant=1e-6,
using_scipy=True,
verify_gradient=True,
debug_mode_on=False)
eps = 1e-6
using_l1_regularizer_configs = [True, False]
for using_l1_regularizer in using_l1_regularizer_configs:
model = models.EmbeddingModel(
history,
embedding_dimension,
using_prereqs=False,
using_lessons=True,
using_bias=False,
using_l1_regularizer=using_l1_regularizer,
learning_update_variance_constant=0.5)
model.fit(estimator)
self.assertTrue(estimator.fd_err < eps)
# TODO: add unit tests for tv_luv_model, forgetting_model, using_graph_prior=True,
# and using_lessons=False for temporal process on student
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Mottl/quickpipeline | quickpipeline/__init__.py | 1 | 11960 | # -*- coding: utf-8 -*-
"""
quickpipeline module implements QuickPipeline class that do all the necessary
things to prepare data for machine learning tasks.
2017 (c) Dmitry Mottl
License: MIT
"""
from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn.preprocessing import (
Imputer, StandardScaler,
LabelEncoder, OneHotEncoder, LabelBinarizer
)
from scipy.stats import skew
class QuickPipeline():
"""
QuickPipeline
Performs the following tasks on input pandas dataframes:
1. Fills empty data in a dataframe;
2. Converts categorical columns to one-hot columns or binary columns;
3. Deskews, moves and scales numerical columns to mean=1 and std=1;
4. Drops uncorrelated and unuseful columns.
Parameters
----------
categorical_features : array-like
A list of column names that must be one-hot encoded.
y_column_name : str
A name of column that is considered as y and must be converted from
string to integer.
impute : str (default='mean')
A strategy of imputing missed values; passed to
sklearn.preprocessing.Imputer.
scale : bool (default=True)
Moves and scales numerical columns to mean=1 and std=1.
max_missing : float (default=0.9)
The maximum percentage of missing data in a column. Discards a column
if a percentage exceeds this value.
min_correlation : float (default=None)
Absolute minimum correlation coefficient between feature and y column.
Feature column droped if absolute correlation is lower than this value.
deskew : float (default=0.2)
Deskew features with an absolute skewness more than this parameter
(see scipy.stats.skew). Set to None to disable deskewing.
copy : bool (default=True)
Return a new dataframe(s) instead of modification the input
dataframe(s).
"""
def __init__(self, categorical_features=None, y_column_name=None,
impute='mean', scale=True,
max_missing=0.9, min_correlation=None,
deskew=0.2, copy=True
):
self.categorical_features = categorical_features
self.y_column_name = y_column_name
self.impute = impute
self.scale = scale
self.max_missing = max_missing
self.min_correlation = min_correlation
self.deskew = deskew
self.copy = copy
# hardcoded thresholds:
self.min_unique_for_deskew = 50
def fit_transform(self, df, df2=None):
'''
Fit and transform pandas dataframes
Parameters
----------
df: pandas Dataframe, shape (n_samples, n_features(+1 if y used))
Training dataframe with y column if needed (must be specified with
y_column_name in constructor)
df2: pandas Dataframe, shape (n_samples, n_features) (default=None)
Testing dataframe
'''
if not isinstance(df, pd.DataFrame):
raise ValueError('df must be pandas DataFrames')
if df2 is not None:
if not isinstance(df2, pd.DataFrame):
raise ValueError('df must be pandas DataFrames')
df_columns = set(df.columns)
df2_columns = set(df2.columns)
if self.y_column_name is not None:
df_columns.discard(self.y_column_name)
df2_columns.discard(self.y_column_name)
if len(df_columns ^ df2_columns) != 0:
raise ValueError('df and df2 columns mismatch')
if self.y_column_name is not None and self.y_column_name not in df.columns:
raise ValueError('y_column_name not found in df')
if self.copy:
df = df.copy()
if df2 is not None:
df2 = df2.copy()
# convert pandas categorical columns to string
for c in df.columns:
if pd.api.types.is_categorical_dtype(df[c]):
df[c] = df[c].astype(str)
if df2 is not None:
for c in df2.columns:
if pd.api.types.is_categorical_dtype(df2[c]):
df2[c] = df2[c].astype(str)
# remove feature if missing data percentage exceeds self.max_missing:
for c in df.columns:
if c == self.y_column_name:
continue
missing = float(df[c].isnull().sum())/df.shape[0]
if df2 is not None:
missing2 = float(df2[c].isnull().sum())/df2.shape[0]
else:
missing2 = 0
if missing > self.max_missing or missing2 > self.max_missing:
del df[c]
if df2 is not None:
del df2[c]
continue
# create a list of categorical features if not set
if self.categorical_features is None: # get categorical_features automatically
self.categorical_features = list(filter(
lambda c: c != self.y_column_name
and (
(df[c].dtype == object)
or (df2 is not None and df2[c].dtype == object)
),
df.columns
))
# find and correct skewed features
self.deskewed_features = list()
if self.deskew != 0.0 and self.deskew is not None:
numeric_features = list(df.dtypes[df.dtypes != object].index)
if self.y_column_name in numeric_features:
del numeric_features[numeric_features.index(self.y_column_name)]
skewness = df[numeric_features].apply(lambda s: skew(s.dropna().astype(np.float_)))
skewed_positive = skewness[skewness>self.deskew].index
skewed_negative = skewness[skewness<-self.deskew].index
for c in skewed_positive:
# skip if a number of unique values are too low
if df[c].nunique() < self.min_unique_for_deskew:
continue
if min(df[c])<=0: # skip if negative values found
continue
if (df2 is not None) and (min(df2[c])<=0):
continue
df[c] = np.log(df[c])
if df2 is not None:
df2[c] = np.log(df2[c])
self.deskewed_features.append(c)
#for c in skewed_negative:
# df[c] = np.exp(df[c])
# if df2 is not None:
# df2[c] = np.exp(df2[c])
# impute missing values in numeric features and normalize values:
for c in df.columns:
if (c in self.categorical_features) or (c == self.y_column_name):
continue
imputer = Imputer(strategy=self.impute)
df[c] = imputer.fit_transform(df[c].values.reshape(-1,1))
scaler = StandardScaler()
df[c] = scaler.fit_transform(df[c].values.reshape(-1,1))
if df2 is not None:
df2[c] = imputer.transform(df2[c].values.reshape(-1,1))
df2[c] = scaler.transform(df2[c].values.reshape(-1,1))
# create a dicts for encoders, key is a column name, value is an encoder
self.__label_encoders = defaultdict(LabelEncoder)
self.__label_binarizers = defaultdict(LabelBinarizer)
self.__onehot_encoders = defaultdict(OneHotEncoder)
for c in self.categorical_features:
df[c] = df[c].fillna('~~~') # fills with '~~~'
if df2 is not None:
df2[c] = df2[c].fillna('~~~') # fills with '~~~'
uniques = set(df[c].unique())
if df2 is not None:
uniques |= set(df2[c].unique())
if len(uniques) == 1:
# remove columns that do not contain useful data
del df[c]
if df2 is not None:
del df2[c]
elif len(uniques) == 2:
# binarize
df[c] = self.__label_binarizers[c].fit_transform(df[c])
if df2 is not None:
df2[c] = self.__label_binarizers[c].transform(df2[c])
else:
# convert to labels
# get all possible values from a given column and fit LabelEncoder
categories = set(df[c].unique())
if df2 is not None:
categories |= set(df2[c].unique())
categories = sorted(categories)
labels = [c+'_'+cat if cat!='~~~' else '~~~' for cat in categories] # column labels
# construct a column of possible values
possible_values = self.__label_encoders[c].fit_transform(categories).reshape(-1,1)
transformed_series = self.__label_encoders[c].transform(df[c]).reshape(-1,1)
if df2 is not None:
transformed_series2 = self.__label_encoders[c].transform(df2[c]).reshape(-1,1)
# create a one-hot matrix dataframe for a given column
self.__onehot_encoders[c].fit(possible_values)
one_hot_matrix = self.__onehot_encoders[c].transform(transformed_series)
one_hot_dataframe = pd.DataFrame(
data=one_hot_matrix.toarray(), # convert sparse matrix to 2dim array
index=df.index,
columns=labels,
dtype=np.int8
)
# remove `missing values` column form one_hot_dataframe
if '~~~' in one_hot_dataframe.columns:
del one_hot_dataframe['~~~']
# remove old column and add a one-hot matrix
del df[c]
# add one-hot columns to df
for c1 in one_hot_dataframe.columns:
df[c1] = one_hot_dataframe[c1]
if df2 is not None:
one_hot_matrix = self.__onehot_encoders[c].transform(transformed_series2)
one_hot_dataframe = pd.DataFrame(
data=one_hot_matrix.toarray(),
index=df2.index,
columns=labels,
dtype=np.int8
)
if '~~~' in one_hot_dataframe.columns:
del one_hot_dataframe['~~~']
del df2[c]
for c1 in one_hot_dataframe.columns:
df2[c1] = one_hot_dataframe[c1]
if (self.min_correlation is not None) and (self.y_column_name is not None):
correlation = df.corr()[self.y_column_name]
self.non_correlative = correlation[
(correlation<self.min_correlation)
& (correlation>-self.min_correlation)
].index.values
df.drop(self.non_correlative, axis=1, inplace=True)
if df2 is not None:
df2.drop(self.non_correlative, axis=1, inplace=True)
if self.y_column_name is not None:
if df[self.y_column_name].dtype == object:
self.y_encoder = LabelEncoder()
df[self.y_column_name] = self.y_encoder.fit_transform(df[self.y_column_name])
# move y column to end
y = df[self.y_column_name]
del df[self.y_column_name]
df[self.y_column_name] = y
if df2 is not None:
return df, df2
else:
return df
if __name__ == "__main__":
s1 = pd.Series([1,2,3,np.nan,4,5], dtype=np.float16)
s2 = pd.Series(["A","B",np.nan,"A","C","B"])
y = pd.Series(["yes","yes","no","yes","no","no"])
df = pd.DataFrame({"s1": s1, "s2": s2, "y": y})
pipeline = QuickPipeline(y_column_name="y", copy=True)
df_prepared = pipeline.fit_transform(df)
print(df_prepared)
| mit |
toastedcornflakes/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
jfitzgerald79/gis-1 | vectorize_polygon.py | 2 | 9658 | import pandas as pd
import numpy as np
import fiona
from shapely import geometry
from shapely import ops
from itertools import chain
from pyproj import Proj, transform
from scipy import spatial
from matplotlib import path
import os
from datetime import datetime
import sys
import pysal as ps
import numpy as np
from scipy import spatial
class vectorize_polygon():
def __init__(self, shp, convert_crs=0):
print 'START: %s' % (str(datetime.now()))
print 'loading files...'
print 'getting geometry type info...'
self.shapes={'shp':{}}
self.shapes['shp'].update({'file' : fiona.open(shp, 'r')})
self.shapes['shp'].update({'crs': self.shapes['shp']['file'].crs})
self.shapes['shp'].update({'types': self.geom_types(self.shapes['shp']['file']).dropna()})
self.shapes['shp'].update({'shp' : self.homogenize_inputs('shp', range(len(self.shapes['shp']['file'])))})
self.shapes['shp'].update({'poly' : self.poly_return('shp')})
print 'END: %s' % (str(datetime.now()))
def file_chunks(self, l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield np.array(l[i:i+n])
def homogenize_inputs(self, shp, chunk):
print 'homogenizing inputs for %s...' % (shp)
d = {}
bv = self.poly_vectorize(self.shapes[shp]['file'], chunk).dropna()
gtypes = self.shapes[shp]['types'].loc[bv.index]
poly = bv.loc[gtypes=='Polygon']
mpoly = bv.loc[gtypes=='MultiPolygon']
apoly = poly.apply(lambda x: list(chain(*x)))
a_mpoly = mpoly.apply(lambda x: list(chain(*x)))
#### HOMOGENIZE POLYGONS
if len(poly) > 0:
polyarrays = pd.Series(apoly.apply(lambda x: np.array(x)))
p_x_arrays = polyarrays.apply(lambda x: np.array(x)[:,0])
p_y_arrays = polyarrays.apply(lambda x: np.array(x)[:,1])
p_trans_arrays = pd.concat([p_x_arrays, p_y_arrays], axis=1)
d['p_geom'] = pd.Series(zip(p_trans_arrays[0], p_trans_arrays[1]), index=p_trans_arrays.index).apply(np.column_stack)
d['p_geom'] = d['p_geom'][d['p_geom'].apply(lambda x: x.shape[0]>=4)]
#### HOMOGENIZE MULTIPOLYGONS
if len(mpoly) > 0:
mpolydims = a_mpoly.apply(lambda x: np.array(x).ndim)
##ndim==1
if (mpolydims==1).any():
m_x_arrays_1 = a_mpoly[mpolydims==1].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,0])
m_y_arrays_1 = a_mpoly[mpolydims==1].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,1])
mp = pd.concat([m_x_arrays_1, m_y_arrays_1], axis=1)
m_geom_1_s = pd.Series(zip(mp[0], mp[1])).apply(np.column_stack)
empty_s = pd.Series(range(len(mp)), index=mp.index)
empty_s = empty_s.reset_index()
empty_s[0] = m_geom_1_s
empty_s = empty_s[empty_s[0].apply(lambda x: x.shape[0]>=4)]
d['m_geom_1'] = empty_s.groupby('level_0').apply(lambda x: tuple(list(x[0])))
##ndim==3
if (mpolydims==3).any():
m_arrays_3 = a_mpoly[mpolydims==3].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,[0,1]])
m_arrays_3 = m_arrays_3[m_arrays_3.apply(lambda x: x.shape[0]>=4)]
d['m_geom_3'] = m_arrays_3.reset_index().groupby('level_0').apply(lambda x: tuple(list(x[0])))
returndf = pd.concat(d.values()).sort_index()
return returndf
def convert_crs(self, shp, crsfrom, crsto, chunk):
print 'converting coordinate reference system of %s...' % (shp)
crsfrom = Proj(crsfrom, preserve_units=True)
crsto = Proj(crsto, preserve_units=True)
d = {}
bv = self.poly_vectorize(self.shapes[shp]['file'], chunk).dropna()
gtypes = self.shapes[shp]['types'].loc[bv.index]
poly = bv.loc[gtypes=='Polygon']
mpoly = bv.loc[gtypes=='MultiPolygon']
apoly = poly.apply(lambda x: list(chain(*x)))
a_mpoly = mpoly.apply(lambda x: list(chain(*x)))
#### CONVERT POLYGONS
if len(poly) > 0:
polyarrays = pd.Series(apoly.apply(lambda x: np.array(x)))
p_x_arrays = polyarrays.apply(lambda x: np.array(x)[:,0])
p_y_arrays = polyarrays.apply(lambda x: np.array(x)[:,1])
p_trans_arrays = pd.concat([p_x_arrays, p_y_arrays], axis=1).apply(lambda x: transform(crsfrom, crsto, x[0], x[1]), axis=1)
d['p_trans_geom'] = p_trans_arrays.apply(np.array).apply(np.column_stack)
d['p_trans_geom'] = d['p_trans_geom'][d['p_trans_geom'].apply(lambda x: x.shape[0]>=4)]
#### CONVERT MULTIPOLYGONS
if len(mpoly) > 0:
mpolydims = a_mpoly.apply(lambda x: np.array(x).ndim)
##ndim==1
if (mpolydims==1).any():
m_x_arrays_1 = a_mpoly[mpolydims==1].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,0])
m_y_arrays_1 = a_mpoly[mpolydims==1].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,1])
mp = pd.concat([m_x_arrays_1, m_y_arrays_1], axis=1)
m_x_flat_arrays_1 = pd.Series([j[:,0] for j in [np.column_stack(i) for i in np.column_stack([mp[0].values, mp[1].values])]])
m_y_flat_arrays_1 = pd.Series([j[:,0] for j in [np.column_stack(i) for i in np.column_stack([mp[0].values, mp[1].values])]])
m_trans_arrays_1 = pd.concat([m_x_flat_arrays_1, m_y_flat_arrays_1], axis=1).apply(lambda x: transform(crsfrom, crsto, x[0], x[1]), axis=1)
m_trans_geom_1_s = m_trans_arrays_1.apply(np.array).apply(np.column_stack)
empty_s = pd.Series(range(len(mp)), index=mp.index).reset_index()
empty_s[0] = m_trans_geom_1_s
empty_s = empty_s[empty_s[0].apply(lambda x: x.shape[0]>=4)]
d['m_trans_geom_1'] = empty_s.groupby('level_0').apply(lambda x: tuple(list(x[0])))
##ndim==3
if (mpolydims==3).any():
m_trans_arrays_3 = a_mpoly[mpolydims==3].apply(pd.Series).stack().apply(lambda x: np.array(x)[:,[0,1]]).apply(lambda x: transform(crsfrom, crsto, x[:,0], x[:,1]))
m_trans_geom_3 = m_trans_arrays_3.apply(np.array).apply(np.column_stack)
m_trans_geom_3 = m_trans_geom_3[m_trans_geom_3.apply(lambda x: x.shape[0]>=4)]
m_trans_geom_3_u = m_trans_geom_3.unstack()
d['m_trans_geom_3'] = pd.Series(zip(m_trans_geom_3_u[0], m_trans_geom_3_u[1]), index=m_trans_geom_3_u.index)
return pd.concat(d.values()).sort_index()
def poly_vectorize(self, shpfile, chunk):
s = pd.Series(chunk, index=chunk)
def return_coords(x):
try:
return shpfile[x]['geometry']['coordinates']
except:
return np.nan
return s.apply(return_coords)
def handle_topo_err(self, k):
if k.is_valid:
return k
else:
return k.boundary.convex_hull
def handle_empty(self, k):
if k.is_empty:
return np.nan
elif type(k) != geometry.polygon.Polygon:
return np.nan
else:
return k
def try_union(self, k):
try:
return ops.cascaded_union(k)
except:
try:
u = k[0]
for z in range(len(k))[1:]:
u = u.union(k[z])
return u
except:
return geometry.Polygon(np.vstack(pd.Series(k).apply(lambda x: x.boundary.coords).apply(np.array))).convex_hull
def poly_return(self, shp):
print 'creating polygons for %s...' % (shp)
poly_df = pd.Series(index=self.shapes[shp]['shp'].index)
geomtypes = self.shapes[shp]['types'].loc[poly_df.index]
# print 'making p'
if (geomtypes=='Polygon').any():
p = self.shapes[shp]['shp'].loc[geomtypes=='Polygon'].apply(lambda x: geometry.Polygon(x))#.apply(self.handle_empty)
# print 'setting polydf with p'
poly_df.loc[p.index] = p.copy()
# print 'making mp'
if (geomtypes=='MultiPolygon').any():
mp = self.shapes[shp]['shp'].loc[geomtypes == 'MultiPolygon'].apply(lambda x: (pd.Series(list(x)))).stack().apply(geometry.Polygon)
if mp.apply(lambda x: not x.is_valid).any():
mp = mp.apply(self.handle_topo_err).apply(self.handle_empty).dropna()
mp = mp.reset_index().groupby('level_0').apply(lambda x: list(x[0])).apply(self.try_union)
# print 'setting poly df with mp'
poly_df.loc[mp.index] = mp.copy()
# print 'making nullgeom'
nullgeom = poly_df[poly_df.isnull()].index
# print 'dropping nullgeom from polydf'
poly_df = poly_df.drop(nullgeom)
# print 'dropping nullgeom from selp.shapes.shp'
self.shapes[shp]['shp'] = self.shapes[shp]['shp'].drop(nullgeom)
return poly_df
def geom_types(self, shp):
s = pd.Series(range(len(shp)))
def return_geom(x):
try:
return shp[x]['geometry']['type']
except:
return np.nan
return s.apply(return_geom)
| gpl-2.0 |
wanggang3333/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
hpi-xnor/BMXNet | example/dec/dec.py | 24 | 7846 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
voxlol/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/experimental/enable_hist_gradient_boosting.py | 9 | 1215 | """Enables histogram-based gradient boosting estimators.
The API and results of these estimators might change without any deprecation
cycle.
Importing this file dynamically sets the
:class:`sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`sklearn.ensemble.HistGradientBoostingRegressor` as attributes of the
ensemble module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_hist_gradient_boosting # noqa
>>> # now you can import normally from ensemble
>>> from sklearn.ensemble import HistGradientBoostingClassifier
>>> from sklearn.ensemble import HistGradientBoostingRegressor
The ``# noqa`` comment comment can be removed: it just tells linters like
flake8 to ignore the import, which appears as unused.
"""
from ..ensemble._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor
)
from .. import ensemble
ensemble.HistGradientBoostingClassifier = HistGradientBoostingClassifier
ensemble.HistGradientBoostingRegressor = HistGradientBoostingRegressor
ensemble.__all__ += ['HistGradientBoostingClassifier',
'HistGradientBoostingRegressor']
| bsd-3-clause |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/choroplethmapbox/_colorbar.py | 1 | 73541 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.choroplethmapb
ox.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
choroplethmapbox.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use choroplethmapbox.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use choroplethmapbox.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.choroplethmapbo
x.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.chorop
lethmapbox.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
choroplethmapbox.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.choroplethmapbox.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
choroplethmapbox.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
choroplethmapbox.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.choroplethmapbo
x.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.chorop
lethmapbox.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
choroplethmapbox.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.choroplethmapbox.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
choroplethmapbox.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
choroplethmapbox.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmapbox.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
elaeon/ML | src/dama/data/ds.py | 1 | 18505 | import datetime
import json
import numpy as np
import pandas as pd
import dask.array as da
import dask.bag as db
import dask
from tabulate import tabulate
from dama.abc.data import AbsData
from dama.abc.conn import AbsConn
from dama.data.it import Iterator, BaseIterator, BatchIterator
from dama.utils.core import Hash, Login, Metadata, Chunks, Shape
from dama.abc.driver import AbsDriver
from dama.drivers.core import Memory
from dama.drivers.sqlite import Sqlite
from dama.utils.logger import log_config
from dama.utils.config import get_settings
from dama.utils.decorators import cache, clean_cache
from dama.utils.files import get_dir_file_size
from dama.utils.order import order_table
from dama.connexions.core import GroupManager
from pydoc import locate
settings = get_settings("paths")
log = log_config(__name__)
class Data(AbsData):
def __init__(self, name: str = None, driver: AbsDriver = None, group_name: str = None,
chunks=None, auto_chunks=False, metadata_path: str = None):
if driver is None:
self.driver = Memory()
else:
self.driver = driver
if name is None and not isinstance(self.driver, Memory):
raise Exception("I can't build a dataset without a name, plese add a name to this dataset.")
if self.driver.persistent is True:
if metadata_path is not None:
self.metadata_path = metadata_path
else:
self.metadata_path = settings["metadata_path"]
self.metadata_driver = Sqlite(login=Login(table="data"), path=self.metadata_path)
else:
self.metadata_path = None
self.metadata_driver = None
self.name = name
self.header_map = ["author", "description"]
self.group_name = group_name
self.dtypes = None
self.hash = None
self.author = None
self.description = None
self.timestamp = None
self.compressor_params = None
self.chunksize = chunks
self.from_ds_hash = None
self.auto_chunks = auto_chunks
if self.driver.path is None:
self.driver.path = settings["data_path"]
self.driver.build_url(self.name, group_level=self.group_name)
@property
def author(self):
return self._get_attr('author')
@author.setter
def author(self, value):
if value is not None:
self._set_attr('author', value)
@property
def dtype(self):
return self.data.dtype
@property
def description(self):
return self._get_attr('description')
@description.setter
def description(self, value):
if value is not None:
self._set_attr('description', value)
@property
def timestamp(self):
return self._get_attr('timestamp')
@timestamp.setter
def timestamp(self, value):
if value is not None:
self._set_attr('timestamp', value)
@property
def hash(self):
return self._get_attr('hash')
@hash.setter
def hash(self, value):
if value is not None:
self._set_attr('hash', value)
@property
def compressor_params(self):
return json.loads(self._get_attr('compressor_params'))
@compressor_params.setter
def compressor_params(self, value):
if value is not None:
self._set_attr('compressor_params', json.dumps(value))
@classmethod
def module_cls_name(cls):
return "{}.{}".format(cls.__module__, cls.__name__)
@property
@cache
def data(self) -> AbsConn:
return self.driver.manager(chunks=self.chunksize)
@data.setter
@clean_cache
def data(self, v):
pass
def clean_data_cache(self):
self.data = None
@property
def from_ds_hash(self):
return self._get_attr('from_ds_hash')
@from_ds_hash.setter
def from_ds_hash(self, value):
if value is not None:
self._set_attr('from_ds_hash', value)
def open(self):
self.driver.open()
if self.driver.mode in ["w", "a", "r+"]:
if len(self.driver.compressor_params) > 0:
self.compressor_params = self.driver.compressor_params
if self.auto_chunks is True and self.driver.mode in ["a", "r"]:
try:
self.chunksize = Chunks.build_from_shape(self.driver.shape, self.driver.dtypes)
except KeyError as e:
log.error(e)
else:
if isinstance(self.chunksize, tuple) and self.driver.mode in ["a", "r"]:
groups = self.driver.groups
if groups is not None:
self.chunksize = Chunks.build_from(self.chunksize, groups)
elif self.driver.mode == "w" or self.chunksize is None:
pass
elif isinstance(self.chunksize, Chunks) and self.driver.mode in ["a", "r"]:
pass
else:
raise Exception("chunks value {} not allowed".format(self.chunksize))
def close(self):
self.driver.close()
self.data = None
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
return next(self.data)
def _set_attr(self, name, value):
if value is not None:
log.debug("SET attribute {name} {value}".format(name=name, value=value))
self.driver.attrs[name] = value
def _get_attr(self, name):
try:
return self.driver.attrs[name]
except KeyError:
log.debug("Not found attribute {} in file {}".format(name, self.url))
return None
except IOError as e:
log.debug(e)
log.debug("Error opening {} in file {}".format(name, self.url))
return None
def destroy(self):
hash_hex = self.hash
self.driver.destroy()
if self.driver.persistent is True:
with Metadata(self.metadata_driver) as metadata:
metadata.invalid(hash_hex)
@property
def url(self) -> str:
return self.driver.url
@property
def metadata_url(self) -> str:
return self.metadata_driver.url
def __len__(self):
return len(self.groups)
def __repr__(self):
return repr(self.data)
@property
def size(self):
return self.shape[0]
@property
def shape(self) -> Shape:
return self.data.shape
@property
def groups(self) -> tuple:
if self.data is not None:
return self.data.groups
@property
def dtypes(self) -> np.dtype:
return self.data.dtypes
@dtypes.setter
def dtypes(self, value):
if value is not None:
self.driver.set_schema(value)
def info(self):
print(' ')
print('Name: {}'.format(self.name))
print('Author: {}'.format(self.author))
print('Description: {}'.format(self.description))
print('URL path: {}'.format(self.driver.url))
print('Hash: {}'.format(self.hash))
print(' ')
headers = ["group", "shape", "dtype"]
table = []
shape = self.shape
for group, (dtype, _) in self.dtypes.fields.items():
table.append([group, shape[group], dtype])
print(order_table(headers, table, "Group"))
def metadata(self) -> dict:
meta_dict = dict()
meta_dict["hash"] = self.hash
meta_dict["path"] = self.driver.path
meta_dict["metadata_path"] = self.metadata_path
meta_dict["group_name"] = self.group_name
meta_dict["driver_module"] = self.driver.module_cls_name()
meta_dict["driver_name"] = self.driver.cls_name()
meta_dict["name"] = self.name
meta_dict["size"] = get_dir_file_size(self.url)
meta_dict["timestamp"] = self.timestamp
meta_dict["author"] = self.author
meta_dict["num_groups"] = len(self.groups)
meta_dict["description"] = self.description if self.description is None else ""
meta_dict["from_ds_hash"] = self.from_ds_hash
return meta_dict
def metadata_to_json(self, f):
metadata = self.metadata()
json.dump(metadata, f)
def write_metadata(self):
if self.driver.persistent is True:
with Metadata(self.metadata_driver, self.metadata()) as metadata:
dtypes = np.dtype([("hash", object), ("name", object), ("author", object),
("description", object), ("size", int), ("driver_module", object),
("path", object), ("driver_name", object), ("group_name", object),
("timestamp", np.dtype("datetime64[ns]")), ("num_groups", int),
("is_valid", bool), ("from_ds_hash", object)])
timestamp = metadata["timestamp"]
metadata["timestamp"] = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M UTC')
metadata["group_name"] = "s/n" if self.group_name is None else self.group_name
metadata["is_valid"] = True
metadata.set_schema(dtypes, unique_key=["hash", ["path", "name", "driver_name", "group_name"]])
metadata.insert_update_data(keys=["hash", ["path", "name", "driver_name", "group_name"]])
def calc_hash(self, with_hash: str) -> str:
hash_obj = Hash(hash_fn=with_hash)
header = [getattr(self, attr) for attr in self.header_map]
header = [attr for attr in header if attr is not None]
hash_obj.hash.update("".join(header).encode("utf-8"))
for group in self.groups:
it = Iterator(self.data[group]).batchs(chunks=self.chunksize)
hash_obj.update(it.only_data())
return str(hash_obj)
def from_data(self, data, with_hash: str = "sha1", from_ds_hash: str = None, start_i: int = 0):
if isinstance(data, da.Array):
data = GroupManager.from_da(data)
if self.chunksize is None:
self.chunksize = data.chunksize
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, data.groups)
elif isinstance(data, Iterator):
if self.chunksize is None:
self.chunksize = Chunks.build_from_shape(data.shape, data.dtypes)
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, data.groups)
data = data.batchs(chunks=self.chunksize, start_i=start_i)
self.chunksize = data.chunksize
elif isinstance(data, BatchIterator):
if self.chunksize is None:
self.chunksize = data.chunksize
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, data.groups)
elif isinstance(data, dict) and not isinstance(data, AbsConn):
if self.chunksize is None:
shape, dtypes = Shape.get_shape_dtypes_from_dict(data)
self.chunksize = Chunks.build_from_shape(shape, dtypes)
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, tuple(data.keys()))
data = GroupManager.convert(data, chunks=self.chunksize)
elif isinstance(data, AbsConn):
if self.chunksize is None:
self.chunksize = data.chunksize
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, data.groups)
elif not isinstance(data, BaseIterator):
data = Iterator(data)
if self.chunksize is None:
self.chunksize = Chunks.build_from_shape(data.shape, data.dtypes)
elif isinstance(self.chunksize, tuple):
self.chunksize = Chunks.build_from(self.chunksize, data.groups)
data = data.batchs(chunks=self.chunksize, start_i=start_i)
self.chunksize = data.chunksize
self.dtypes = data.dtypes
self.driver.set_data_shape(data.shape)
if isinstance(data, BatchIterator) or isinstance(data, Iterator):
self.driver.batchs_writer(data)
elif isinstance(data, AbsConn):
self.driver.store(data)
else:
raise NotImplementedError
if with_hash is not None:
c_hash = self.calc_hash(with_hash=with_hash)
else:
c_hash = None
self.from_ds_hash = from_ds_hash
self.hash = c_hash
self.timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M UTC")
self.write_metadata()
def from_loader(self, data_list: list, loader_fn, npartitions: int = 1, with_hash: str = "sha1"):
def concat_partitions(part1: list, part2: list):
if not isinstance(part1, list):
part1 = [part1]
if not isinstance(part2, list):
part2 = [part2]
return GroupManager.concat(part1 + part2, axis=0)
url_bag_partition = db.from_sequence(data_list, npartitions=npartitions)
fold_loader = url_bag_partition.map(loader_fn).fold(binop=self.add_to_list, combine=concat_partitions,
initial=[])
da_group = fold_loader.compute()
self.from_data(da_group, with_hash=with_hash)
@staticmethod
def add_to_list(base_list, data) -> list:
it = Iterator(data)
groups = it.groups
if len(groups) == 1:
group_items = [(groups[0], data)]
else:
group_items = [(group, data[group]) for group in groups]
dagroup_dict = GroupManager.convert(group_items, Chunks.build_from_shape(it.shape, it.dtypes))
return base_list + [dagroup_dict]
def to_df(self) -> pd.DataFrame:
return self.data.to_df()
def to_ndarray(self, dtype=None) -> np.ndarray:
return self.data.to_ndarray(dtype=dtype)
def concat(self, datasets: tuple, axis=0):
da_groups = []
managers = set([])
for ds in datasets:
da_groups.append(ds.data)
managers.add(type(ds.data))
if len(managers) == 1:
manager = managers.pop()
da_group = manager.concat(da_groups, axis=axis)
self.from_data(da_group)
else:
raise NotImplementedError
def stadistics(self):
headers = ["group", "mean", "std dev", "min", "25%", "50%", "75%", "max", "nonzero", "nonan", "unique", "dtype"]
self.chunksize = Chunks.build_from_shape(self.shape, self.dtypes)
table = []
for group, (dtype, _) in self.dtypes.fields.items():
values = dict()
values["dtype"] = dtype
values["group"] = group
darray = self.data[group].da
if dtype == np.dtype(float) or dtype == np.dtype(int):
da_mean = da.around(darray.mean(), decimals=3)
da_std = da.around(darray.std(), decimals=3)
da_min = da.around(darray.min(), decimals=3)
da_max = da.around(darray.max(), decimals=3)
result = dask.compute([da_mean, da_std, da_min, da_max])[0]
values["mean"] = result[0] if not np.isnan(result[0]) else da.around(da.nanmean(darray), decimals=3).compute()
values["std dev"] = result[1] if not np.isnan(result[0]) else da.around(da.nanstd(darray), decimals=3).compute()
values["min"] = result[2] if not np.isnan(result[0]) else da.around(da.nanmin(darray), decimals=3).compute()
values["max"] = result[3] if not np.isnan(result[0]) else da.around(da.nanmax(darray), decimals=3).compute()
if len(self.shape[group]) == 1:
da_percentile = da.around(da.percentile(darray, [25, 50, 75]), decimals=3)
result = da_percentile.compute()
values["25%"] = result[0]
values["50%"] = result[1]
values["75%"] = result[2]
else:
values["25%"] = "-"
values["50%"] = "-"
values["75%"] = "-"
values["nonzero"] = da.count_nonzero(darray).compute()
values["nonan"] = da.count_nonzero(da.notnull(darray)).compute()
values["unique"] = "-"
else:
values["mean"] = "-"
values["std dev"] = "-"
values["min"] = "-"
values["max"] = "-"
values["25%"] = "-"
values["50%"] = "-"
values["75%"] = "-"
values["nonzero"] = "-"
values["nonan"] = da.count_nonzero(da.notnull(darray)).compute()
vunique = darray.to_dask_dataframe().fillna('').nunique().compute()
values["unique"] = vunique
row = []
for column in headers:
row.append(values[column])
table.append(row)
print("# rows {}".format(self.shape[0]))
return tabulate(table, headers)
@staticmethod
def load(hash_hex: str, metadata_driver: AbsDriver, metadata_path: str = None, auto_chunks: bool = True) -> 'Data':
with Metadata(metadata_driver) as metadata:
query = "SELECT name, driver_module, path, group_name, hash FROM {} WHERE hash = ?".format(
metadata_driver.login.table)
data = metadata.query(query, (hash_hex,))
if len(data) == 0:
log.warning(
"Resource {} does not exists in table '{}' in url {}".format(hash_hex,
metadata_driver.login.table,
metadata_driver.url))
else:
row = data[0]
data_driver = locate(row[1])
path = row[2]
group_name = None if row[3] == "s/n" else row[3]
name = row[0]
return Data(name=name, group_name=group_name, driver=data_driver(path=path, mode="r"),
metadata_path=metadata_path, auto_chunks=auto_chunks)
| apache-2.0 |
with-git/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
jmontoyam/mne-python | examples/inverse/plot_morph_data.py | 15 | 2220 | """
==========================================================
Morph source estimates from one subject to another subject
==========================================================
A source estimate from a given subject 'sample' is morphed
to the anatomy of another subject 'fsaverage'. The output
is a source estimate defined on the anatomy of 'fsaverage'
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subject_from = 'sample'
subject_to = 'fsaverage'
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg'
# Read input stc file
stc_from = mne.read_source_estimate(fname)
# Morph using one method (supplying the vertices in fsaverage's source
# space makes it faster). Note that for any generic subject, you could do:
# vertices_to = mne.grade_to_vertices(subject_to, grade=5)
# But fsaverage's source space was set up so we can just do this:
vertices_to = [np.arange(10242), np.arange(10242)]
stc_to = mne.morph_data(subject_from, subject_to, stc_from, n_jobs=1,
grade=vertices_to, subjects_dir=subjects_dir)
stc_to.save('%s_audvis-meg' % subject_to)
# Morph using another method -- useful if you're going to do a lot of the
# same inter-subject morphing operations; you could save and load morph_mat
morph_mat = mne.compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
subjects_dir=subjects_dir)
stc_to_2 = mne.morph_data_precomputed(subject_from, subject_to,
stc_from, vertices_to, morph_mat)
stc_to_2.save('%s_audvis-meg_2' % subject_to)
# View source activations
plt.plot(stc_from.times, stc_from.data.mean(axis=0), 'r', label='from')
plt.plot(stc_to.times, stc_to.data.mean(axis=0), 'b', label='to')
plt.plot(stc_to_2.times, stc_to.data.mean(axis=0), 'g', label='to_2')
plt.xlabel('time (ms)')
plt.ylabel('Mean Source amplitude')
plt.legend()
plt.show()
| bsd-3-clause |
dsm054/pandas | pandas/tests/arrays/categorical/test_dtypes.py | 3 | 6796 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import long
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
import pandas.util.testing as tm
class TestCategoricalDtypes(object):
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
assert c1.is_dtype_equal(c1)
assert c2.is_dtype_equal(c2)
assert c3.is_dtype_equal(c3)
assert c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list('aabca')))
assert not c1.is_dtype_equal(c1.astype(object))
assert c1.is_dtype_equal(CategoricalIndex(c1))
assert (c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1.is_dtype_equal(s1)
assert c2.is_dtype_equal(s2)
assert c3.is_dtype_equal(s3)
assert c1.is_dtype_equal(s2)
assert not c1.is_dtype_equal(s3)
assert not c1.is_dtype_equal(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(list('abcd')))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list('abcd')))
@pytest.mark.parametrize('values, categories, new_categories', [
# No NaNs, same cats, same order
(['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
# Same, unsorted
(['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
# NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
(['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
# Introduce NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a']),
(['a', 'b', 'c'], ['a', 'b'], ['b']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
# No overlap
(['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
])
@pytest.mark.parametrize('ordered', [True, False])
def test_set_dtype_many(self, values, categories, new_categories,
ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(['a', 'b', 'c'], ['d', 'e'])
result = c._set_dtype(CategoricalDtype(['a', 'b']))
expected = Categorical([None, None, None], categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = Categorical(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
result = Categorical(['foo%05d' % i for i in range(40000)])
assert result.codes.dtype == 'int32'
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = result.add_categories(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
assert result.codes.dtype == 'int8'
@pytest.mark.parametrize('ordered', [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list('abbaaccc'), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = 'could not convert string to float'
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype=np.int)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=np.float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('cat_ordered', [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH 10696/18593
data = list('abcaacbab')
cat = Categorical(data, categories=list('bac'), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(
data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list('adc'), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype('category')
expected = cat
tm.assert_categorical_equal(result, expected)
def test_iter_python_types(self):
# GH-19909
# TODO(Py2): Remove long
cat = Categorical([1, 2])
assert isinstance(list(cat)[0], (int, long))
assert isinstance(cat.tolist()[0], (int, long))
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp('2017-01-01'),
Timestamp('2017-01-02')])
assert isinstance(list(cat)[0], Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/pylab_examples/demo_agg_filter.py | 10 | 9340 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
def smooth1d(x, window_len):
# copied from http://www.scipy.org/Cookbook/SignalSmooth
s=np.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
w = np.hanning(window_len)
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def smooth2d(A, sigma=3):
window_len = max(int(sigma), 3)*2+1
A1 = np.array([smooth1d(x, window_len) for x in np.asarray(A)])
A2 = np.transpose(A1)
A3 = np.array([smooth1d(x, window_len) for x in A2])
A4 = np.transpose(A3)
return A4
class BaseFilter(object):
def prepare_image(self, src_image, dpi, pad):
ny, nx, depth = src_image.shape
#tgt_image = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src[pad:-pad, pad:-pad,:] = src_image[:,:,:]
return padded_src#, tgt_image
def get_pad(self, dpi):
return 0
def __call__(self, im, dpi):
pad = self.get_pad(dpi)
padded_src = self.prepare_image(im, dpi, pad)
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class OffsetFilter(BaseFilter):
def __init__(self, offsets=None):
if offsets is None:
self.offsets = (0, 0)
else:
self.offsets = offsets
def get_pad(self, dpi):
return int(max(*self.offsets)/72.*dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox/72.*dpi), axis=1)
a2 = np.roll(a1, -int(oy/72.*dpi), axis=0)
return a2
class GaussianFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, alpha=0.5, color=None):
self.sigma = sigma
self.alpha = alpha
if color is None:
self.color=(0, 0, 0)
else:
self.color=color
def get_pad(self, dpi):
return int(self.sigma*3/72.*dpi)
def process_image(self, padded_src, dpi):
#offsetx, offsety = int(self.offsets[0]), int(self.offsets[1])
tgt_image = np.zeros_like(padded_src)
aa = smooth2d(padded_src[:,:,-1]*self.alpha,
self.sigma/72.*dpi)
tgt_image[:,:,-1] = aa
tgt_image[:,:,:-1] = self.color
return tgt_image
class DropShadowFilter(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=None, offsets=None):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
from matplotlib.colors import LightSource
class LightFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, fraction=0.5):
self.gauss_filter = GaussianFilter(sigma, alpha=1)
self.light_source = LightSource()
self.fraction = fraction
#hsv_min_val=0.5,hsv_max_val=0.9,
# hsv_min_sat=0.1,hsv_max_sat=0.1)
def get_pad(self, dpi):
return self.gauss_filter.get_pad(dpi)
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
elevation = t1[:,:,3]
rgb = padded_src[:,:,:3]
rgb2 = self.light_source.shade_rgb(rgb, elevation,
fraction=self.fraction)
tgt = np.empty_like(padded_src)
tgt[:,:,:3] = rgb2
tgt[:,:,3] = padded_src[:,:,3]
return tgt
class GrowFilter(BaseFilter):
"enlarge the area"
def __init__(self, pixels, color=None):
self.pixels = pixels
if color is None:
self.color=(1, 1, 1)
else:
self.color=color
def __call__(self, im, dpi):
pad = self.pixels
ny, nx, depth = im.shape
new_im = np.empty([pad*2+ny, pad*2+nx, depth], dtype="d")
alpha = new_im[:,:,3]
alpha.fill(0)
alpha[pad:-pad, pad:-pad] = im[:,:,-1]
alpha2 = np.clip(smooth2d(alpha, self.pixels/72.*dpi) * 5, 0, 1)
new_im[:,:,-1] = alpha2
new_im[:,:,:-1] = self.color
offsetx, offsety = -pad, -pad
return new_im, offsetx, offsety
from matplotlib.artist import Artist
class FilteredArtistList(Artist):
"""
A simple container to draw filtered artist.
"""
def __init__(self, artist_list, filter):
self._artist_list = artist_list
self._filter = filter
Artist.__init__(self)
def draw(self, renderer):
renderer.start_rasterizing()
renderer.start_filter()
for a in self._artist_list:
a.draw(renderer)
renderer.stop_filter(self._filter)
renderer.stop_rasterizing()
import matplotlib.transforms as mtransforms
def filtered_text(ax):
# mostly copied from contour_demo.py
# prepare image
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# draw
im = ax.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3,3,-2,2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = ax.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3,3,-2,2))
ax.set_aspect("auto")
# contour label
cl = ax.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=11)
# change clable color to black
from matplotlib.patheffects import Normal
for t in cl:
t.set_color("k")
t.set_path_effects([Normal()]) # to force TextPath (i.e., same font in all backends)
# Add white glows to improve visibility of labels.
white_glows = FilteredArtistList(cl, GrowFilter(3))
ax.add_artist(white_glows)
white_glows.set_zorder(cl[0].get_zorder()-0.1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def drop_shadow_line(ax):
# copied from examples/misc/svg_filter_line.py
# draw lines
l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-",
mec="b", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "ro-",
mec="r", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
gauss = DropShadowFilter(4)
for l in [l1, l2]:
# draw shadows with same lines with slight offset.
xx = l.get_xdata()
yy = l.get_ydata()
shadow, = ax.plot(xx, yy)
shadow.update_from(l)
# offset transform
ot = mtransforms.offset_copy(l.get_transform(), ax.figure,
x=4.0, y=-6.0, units='points')
shadow.set_transform(ot)
# adjust zorder of the shadow lines so that it is drawn below the
# original lines
shadow.set_zorder(l.get_zorder()-0.5)
shadow.set_agg_filter(gauss)
shadow.set_rasterized(True) # to support mixed-mode renderers
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def drop_shadow_patches(ax):
# copyed from barchart_demo.py
N = 5
menMeans = (20, 35, 30, 35, 27)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
rects1 = ax.bar(ind, menMeans, width, color='r', ec="w", lw=2)
womenMeans = (25, 32, 34, 20, 25)
rects2 = ax.bar(ind+width+0.1, womenMeans, width, color='y', ec="w", lw=2)
#gauss = GaussianFilter(1.5, offsets=(1,1), )
gauss = DropShadowFilter(5, offsets=(1,1), )
shadow = FilteredArtistList(rects1+rects2, gauss)
ax.add_artist(shadow)
shadow.set_zorder(rects1[0].get_zorder()-0.1)
ax.set_xlim(ind[0]-0.5, ind[-1]+1.5)
ax.set_ylim(0, 40)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def light_filter_pie(ax):
fracs = [15,30,45, 10]
explode=(0, 0.05, 0, 0)
pies = ax.pie(fracs, explode=explode)
ax.patch.set_visible(True)
light_filter = LightFilter(9)
for p in pies[0]:
p.set_agg_filter(light_filter)
p.set_rasterized(True) # to support mixed-mode renderers
p.set(ec="none",
lw=2)
gauss = DropShadowFilter(9, offsets=(3,4), alpha=0.7)
shadow = FilteredArtistList(pies[0], gauss)
ax.add_artist(shadow)
shadow.set_zorder(pies[0][0].get_zorder()-0.1)
if 1:
plt.figure(1, figsize=(6, 6))
plt.subplots_adjust(left=0.05, right=0.95)
ax = plt.subplot(221)
filtered_text(ax)
ax = plt.subplot(222)
drop_shadow_line(ax)
ax = plt.subplot(223)
drop_shadow_patches(ax)
ax = plt.subplot(224)
ax.set_aspect(1)
light_filter_pie(ax)
ax.set_frame_on(True)
plt.show()
| unlicense |
MJuddBooth/pandas | pandas/tests/io/generate_legacy_storage_files.py | 1 | 12877 | #!/usr/bin/env python
"""
self-contained to write legacy storage (pickle/msgpack) files
To use this script. Create an environment where you want
generate pickles, say its for 0.18.1, with your pandas clone
in ~/pandas
. activate pandas_0.18.1
cd ~/
$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \
pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.18.1
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.18.1/
storage format: pickle
created pickle file: 0.18.1_x86_64_darwin_3.5.2.pickle
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of pandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with master). These are then compared.
If we have cases where we changed the signature (e.g. we renamed
offset -> freq in Timestamp). Then we have to conditionally execute
in the generate_legacy_storage_files.py to make it
run under the older AND the newer version.
"""
from __future__ import print_function
from datetime import timedelta
from distutils.version import LooseVersion
import os
import platform as pl
import sys
import numpy as np
from pandas.compat import u
import pandas
from pandas import (
Categorical, DataFrame, Index, MultiIndex, NaT, Period, Series,
SparseDataFrame, SparseSeries, Timestamp, bdate_range, date_range,
period_range, timedelta_range, to_msgpack)
from pandas.tseries.offsets import (
FY5253, BusinessDay, BusinessHour, CustomBusinessDay, DateOffset, Day,
Easter, Hour, LastWeekOfMonth, Minute, MonthBegin, MonthEnd, QuarterBegin,
QuarterEnd, SemiMonthBegin, SemiMonthEnd, Week, WeekOfMonth, YearBegin,
YearEnd)
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = u'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = u'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {u'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
u'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
u'C': np.arange(10).astype(np.int64),
u'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
u'A': [0., 1., 2., 3., np.nan],
u'B': [0, 1, 0, 1, 0],
u'C': [u'foo1', u'foo2', u'foo3', u'foo4', u'foo5'],
u'D': date_range('1/1/2009', periods=5),
u'E': [0., 1, Timestamp('20100101'), u'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'),
period=Period('2012', 'M'))
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10),
float=Index(np.arange(10, dtype=np.float64)),
uint=Index(np.arange(10, dtype=np.uint64)),
timedelta=timedelta_range('00:00:00', freq='30T', periods=10))
if _loose_version >= LooseVersion('0.18'):
from pandas import RangeIndex
index['range'] = RangeIndex(10)
if _loose_version >= LooseVersion('0.21'):
from pandas import interval_range
index['interval'] = interval_range(0, periods=10)
mi = dict(reg2=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz', u'baz', u'foo',
u'foo', u'qux', u'qux'],
[u'one', u'two', u'one', u'two', u'one',
u'two', u'one', u'two']])),
names=[u'first', u'second']))
series = dict(float=Series(data[u'A']),
int=Series(data[u'B']),
mixed=Series(data[u'E']),
ts=Series(np.arange(10).astype(np.int64),
index=date_range('20130101', periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2],
[3, 4, 3, 4, 5]])),
names=[u'one', u'two'])),
dup=Series(np.arange(5).astype(np.float64),
index=[u'A', u'B', u'C', u'D', u'A']),
cat=Series(Categorical([u'foo', u'bar', u'baz'])),
dt=Series(date_range('20130101', periods=5)),
dt_tz=Series(date_range('20130101', periods=5,
tz='US/Eastern')),
period=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list(u"ABCDA")
frame = dict(float=DataFrame({u'A': series[u'float'],
u'B': series[u'float'] + 1}),
int=DataFrame({u'A': series[u'int'],
u'B': series[u'int'] + 1}),
mixed=DataFrame({k: data[k]
for k in [u'A', u'B', u'C', u'D']}),
mi=DataFrame({u'A': np.arange(5).astype(np.float64),
u'B': np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz',
u'baz', u'baz'],
[u'one', u'two', u'one',
u'two', u'three']])),
names=[u'first', u'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=[u'A', u'B', u'A']),
cat_onecol=DataFrame({u'A': Categorical([u'foo', u'bar'])}),
cat_and_float=DataFrame({
u'A': Categorical([u'foo', u'bar', u'baz']),
u'B': np.arange(3).astype(np.int64)}),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET')}, index=range(5)),
dt_mixed2_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET'),
u'C': Timestamp('20130603', tz='UTC')}, index=range(5))
)
cat = dict(int8=Categorical(list('abcdefg')),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)))
timestamp = dict(normal=Timestamp('2011-01-01'),
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'))
if _loose_version < LooseVersion('0.19.2'):
timestamp['freq'] = Timestamp('2011-01-01', offset='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
offset='M')
else:
timestamp['freq'] = Timestamp('2011-01-01', freq='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
freq='M')
off = {'DateOffset': DateOffset(years=1),
'DateOffset_h_ns': DateOffset(hour=6, nanoseconds=5824),
'BusinessDay': BusinessDay(offset=timedelta(seconds=9)),
'BusinessHour': BusinessHour(normalize=True, n=6, end='15:14'),
'CustomBusinessDay': CustomBusinessDay(weekmask='Mon Fri'),
'SemiMonthBegin': SemiMonthBegin(day_of_month=9),
'SemiMonthEnd': SemiMonthEnd(day_of_month=24),
'MonthBegin': MonthBegin(1),
'MonthEnd': MonthEnd(1),
'QuarterBegin': QuarterBegin(1),
'QuarterEnd': QuarterEnd(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'YearEnd': YearEnd(1),
'Week': Week(1),
'Week_Tues': Week(2, normalize=False, weekday=1),
'WeekOfMonth': WeekOfMonth(week=3, weekday=4),
'LastWeekOfMonth': LastWeekOfMonth(n=1, weekday=3),
'FY5253': FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
'Easter': Easter(),
'Hour': Hour(1),
'Minute': Minute(1)}
return dict(series=series,
frame=frame,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp,
offsets=off)
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if _loose_version < LooseVersion('0.14.1'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
if _loose_version < LooseVersion('0.17.0'):
del data['series']['period']
del data['scalars']['period']
return data
def _u(x):
return {u(k): _u(x[k]) for k in x} if isinstance(x, dict) else x
def create_msgpack_data():
data = create_data()
if _loose_version < LooseVersion('0.17.0'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
if _loose_version < LooseVersion('0.18.0'):
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['series']['period']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
del data['scalars']['period']
if _loose_version < LooseVersion('0.23.0'):
del data['index']['interval']
del data['offsets']
return _u(data)
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()),
str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import zip, cPickle as pickle # noqa
except ImportError:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, "
"and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir, compress):
version = pandas.__version__
print("This script generates a storage file for the current arch, "
"system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(),
compress=compress)
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if not (3 <= len(sys.argv) <= 4):
exit("Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
"<msgpack_compress_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
try:
compress_type = str(sys.argv[3])
except IndexError:
compress_type = None
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir, compress=compress_type)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
| bsd-3-clause |
tpsatish95/Youtube-Comedy-Comparison | Project/PairGuess.py | 1 | 2924 | __author__ = 'satish'
import numpy as np
from time import time
from sklearn.svm import SVR
from xml.dom import minidom
#### Main Path
p = "./"
import sys
sys.path.append(p + "Processor/")
import PreprocessClass
import pickle
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
class Model(object):
def __init__(self,path):
self.SentiModel = load_obj(path+"classifier")
self.ch2 = load_obj(path+"ch2Model")
self.vectorizer = load_obj(path+"vectorizer")
def getFNF(self,message):
vec = self.vectorizer.transform([message])
Tvec = self.ch2.transform(vec)
pred = self.SentiModel.predict(Tvec)
return pred[0]
class PairGuess(object):
def __init__(self):
self.T = Model(p + "Models/Title/")
self.D = Model(p + "Models/Desc/")
self.C = Model(p + "Models/Comm/")
#Load SVR Classifier Model
self.svr = load_obj(p + "BaggedSVRClassifierModel")
# print("Models Loaded")
self.p = PreprocessClass.Preprocess()
def getTDC(self,Vid):
try:
doc = minidom.parse(p + "Data/Meta/"+Vid.strip()+".txt")
# Title from Meta
title = doc.getElementsByTagName("title")[0].firstChild.nodeValue
# Description
try:
mediaDescription = doc.getElementsByTagName("media:description")[0].firstChild.nodeValue
except:
mediaDescription = "NONE"
except:
title = "NONE"
mediaDescription = "NONE"
try:
com = minidom.parse(p + "Data/Comments/"+Vid.strip()+".txt")
# Comments
comment = [c.firstChild.nodeValue for c in com.getElementsByTagName("content")]
except:
comment = []
return [title,mediaDescription,comment]
def getProcessed(self,TDC):
return [self.p.process(TDC[0]),self.p.process(TDC[1]),[self.p.process(c) for c in TDC[2]]]
def getVec(self,TDC):
TP = 0.5
DP = 0.5
CP = 0.5 # To denote no Comments
if TDC[0] != "none":
TP = self.T.getFNF(TDC[0].encode("utf-8"))
if TDC[1] != "none":
DP = self.D.getFNF(TDC[1].encode("utf-8"))
COUNT = 0
if TDC[2] != []:
for com in TDC[2]:
COUNT += self.C.getFNF(com.encode("utf-8"))
CP = COUNT
return np.array([TP,DP,CP])
def getScore(self,vec):
return self.svr.predict(vec)[0]
def getLR(self,vid1,vid2):
s1 = self.getScore(self.getVec(self.getProcessed(self.getTDC(vid1))))
s2 = self.getScore(self.getVec(self.getProcessed(self.getTDC(vid2))))
if s1>s2:
return "left"
# elif s1 == s2:
# return "same"
else:
return "right"
| apache-2.0 |
dsm054/pandas | pandas/tests/series/test_rank.py | 1 | 19233 | # -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from itertools import chain
import numpy as np
from numpy import nan
import pytest
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.compat import product
import pandas.util._test_decorators as td
from pandas import NaT, Series, Timestamp, date_range
from pandas.api.types import CategoricalDtype
from pandas.tests.series.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesRank(TestData):
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
def test_rank(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled), index=filled.index, name='ts')
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.iloc[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
# GH 5968
iseries = Series(['3 day', '1 day 10m', '-2 day', NaT],
dtype='m8[ns]')
exp = Series([3, 2, 1, np.nan])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_categorical(self):
# GH issue #15420 rank incorrectly orders ordered categories
# Test ascending/descending ranking for ordered categoricals
exp = Series([1., 2., 3., 4., 5., 6.])
exp_desc = Series([6., 5., 4., 3., 2., 1.])
ordered = Series(
['first', 'second', 'third', 'fourth', 'fifth', 'sixth']
).astype(CategoricalDtype(categories=['first', 'second', 'third',
'fourth', 'fifth', 'sixth'],
ordered=True))
assert_series_equal(ordered.rank(), exp)
assert_series_equal(ordered.rank(ascending=False), exp_desc)
# Unordered categoricals should be ranked as objects
unordered = Series(['first', 'second', 'third', 'fourth',
'fifth', 'sixth']).astype(
CategoricalDtype(categories=['first', 'second', 'third',
'fourth', 'fifth', 'sixth'],
ordered=False))
exp_unordered = Series([2., 4., 6., 3., 1., 5.])
res = unordered.rank()
assert_series_equal(res, exp_unordered)
unordered1 = Series(
[1, 2, 3, 4, 5, 6],
).astype(CategoricalDtype([1, 2, 3, 4, 5, 6], False))
exp_unordered1 = Series([1., 2., 3., 4., 5., 6.])
res1 = unordered1.rank()
assert_series_equal(res1, exp_unordered1)
# Test na_option for rank data
na_ser = Series(
['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN]
).astype(CategoricalDtype(['first', 'second', 'third', 'fourth',
'fifth', 'sixth', 'seventh'], True))
exp_top = Series([2., 3., 4., 5., 6., 7., 1.])
exp_bot = Series([1., 2., 3., 4., 5., 6., 7.])
exp_keep = Series([1., 2., 3., 4., 5., 6., np.NaN])
assert_series_equal(na_ser.rank(na_option='top'), exp_top)
assert_series_equal(na_ser.rank(na_option='bottom'), exp_bot)
assert_series_equal(na_ser.rank(na_option='keep'), exp_keep)
# Test na_option for rank data with ascending False
exp_top = Series([7., 6., 5., 4., 3., 2., 1.])
exp_bot = Series([6., 5., 4., 3., 2., 1., 7.])
exp_keep = Series([6., 5., 4., 3., 2., 1., np.NaN])
assert_series_equal(
na_ser.rank(na_option='top', ascending=False),
exp_top
)
assert_series_equal(
na_ser.rank(na_option='bottom', ascending=False),
exp_bot
)
assert_series_equal(
na_ser.rank(na_option='keep', ascending=False),
exp_keep
)
# Test invalid values for na_option
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option='bad', ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option=True, ascending=False)
# Test with pct=True
na_ser = Series(['first', 'second', 'third', 'fourth', np.NaN]).astype(
CategoricalDtype(['first', 'second', 'third', 'fourth'], True))
exp_top = Series([0.4, 0.6, 0.8, 1., 0.2])
exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.])
exp_keep = Series([0.25, 0.5, 0.75, 1., np.NaN])
assert_series_equal(na_ser.rank(na_option='top', pct=True), exp_top)
assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)
assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method='average')
pytest.raises(ValueError, s.rank, 'average')
@pytest.mark.parametrize('contents,dtype', [
([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf],
'float64'),
([-np.inf, -50, -1, -1e-20, -1e-25, -1e-45, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf],
'float32'),
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max],
'uint8'),
pytest.param([np.iinfo(np.int64).min, -100, 0, 1, 9999, 100000,
1e10, np.iinfo(np.int64).max],
'int64',
marks=pytest.mark.xfail(
reason="iNaT is equivalent to minimum value of dtype"
"int64 pending issue GH#16674",
strict=True)),
([NegInfinity(), '1', 'A', 'BA', 'Ba', 'C', Infinity()],
'object')
])
def test_rank_inf(self, contents, dtype):
dtype_na_map = {
'float64': np.nan,
'float32': np.nan,
'int64': iNaT,
'object': None
}
# Insert nans at random positions if underlying dtype has missing
# value. Then adjust the expected order by adding nans accordingly
# This is for testing whether rank calculation is affected
# when values are interwined with nan values.
values = np.array(contents, dtype=dtype)
exp_order = np.array(range(len(values)), dtype='float64') + 1.0
if dtype in dtype_na_map:
na_value = dtype_na_map[dtype]
nan_indices = np.random.choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
# shuffle the testing array and expected results in the same way
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(exp_order[random_order], dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method='average'):
result = s.rank(method=method)
tm.assert_series_equal(result, Series(expected))
dtypes = [None, object]
disabled = {(object, 'first')}
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
@td.skip_if_no_scipy
@pytest.mark.parametrize('ascending', [True, False])
@pytest.mark.parametrize('method', ['average', 'min', 'max', 'first',
'dense'])
@pytest.mark.parametrize('na_option', ['top', 'bottom', 'keep'])
def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):
dtypes = [('object', None, Infinity(), NegInfinity()),
('float64', np.nan, np.inf, -np.inf)]
chunk = 3
disabled = {('object', 'first')}
def _check(s, method, na_option, ascending):
exp_ranks = {
'average': ([2, 2, 2], [5, 5, 5], [8, 8, 8]),
'min': ([1, 1, 1], [4, 4, 4], [7, 7, 7]),
'max': ([3, 3, 3], [6, 6, 6], [9, 9, 9]),
'first': ([1, 2, 3], [4, 5, 6], [7, 8, 9]),
'dense': ([1, 1, 1], [2, 2, 2], [3, 3, 3])
}
ranks = exp_ranks[method]
if na_option == 'top':
order = [ranks[1], ranks[0], ranks[2]]
elif na_option == 'bottom':
order = [ranks[0], ranks[2], ranks[1]]
else:
order = [ranks[0], [np.nan] * chunk, ranks[1]]
expected = order if ascending else order[::-1]
expected = list(chain.from_iterable(expected))
result = s.rank(method=method, na_option=na_option,
ascending=ascending)
tm.assert_series_equal(result, Series(expected, dtype='float64'))
for dtype, na_value, pos_inf, neg_inf in dtypes:
in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk
iseries = Series(in_arr, dtype=dtype)
if (dtype, method) in disabled:
continue
_check(iseries, method, na_option, ascending)
def test_rank_desc_mix_nans_infs(self):
# GH 19538
# check descending ranking when mix nans and infs
iseries = Series([1, np.nan, np.inf, -np.inf, 25])
result = iseries.rank(ascending=False)
exp = Series([3, np.nan, 1, 4, 2], dtype='float64')
tm.assert_series_equal(result, exp)
def test_rank_methods_series(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord('a') + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ['average', 'min', 'max', 'first', 'dense']:
result = ts.rank(method=m)
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
expected = Series(sprank, index=index)
if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
expected = expected.astype('float64')
tm.assert_series_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ['O', 'f8', 'i8']
in_out = [([1], [1]),
([2], [1]),
([0], [1]),
([2, 2], [1, 1]),
([1, 2, 3], [1, 2, 3]),
([4, 2, 1], [3, 2, 1],),
([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5])]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method='dense')
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
s = self.s.dropna()
else:
s = self.s.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
assert_series_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
assert_series_equal(res2, expected)
def test_rank_int(self):
s = self.s.dropna().astype('i8')
for method, res in compat.iteritems(self.results):
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
assert_series_equal(result, expected)
def test_rank_object_bug(self):
# GH 13445
# smoke tests
Series([np.nan] * 32).astype(object).rank(ascending=True)
Series([np.nan] * 32).astype(object).rank(ascending=False)
def test_rank_modify_inplace(self):
# GH 18521
# Check rank does not mutate series
s = Series([Timestamp('2017-01-05 10:20:27.569000'), NaT])
expected = s.copy()
s.rank()
result = s
assert_series_equal(result, expected)
# GH15630, pct should be on 100% basis when method='dense'
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1., 1.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 2, 2. / 2, 2. / 2]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 3, 1. / 3, 3. / 3, 3. / 3, 2. / 3]),
([1, 1, 3, 3, 5, 5], [1. / 3, 1. / 3, 2. / 3, 2. / 3, 3. / 3, 3. / 3]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_dense_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='dense', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1. / 2, 1. / 2]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2. / 3, 2. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 5, 1. / 5, 4. / 5, 4. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [1. / 6, 1. / 6, 3. / 6, 3. / 6, 5. / 6, 5. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_min_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='min', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1., 1.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 3. / 3, 3. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [2. / 5, 2. / 5, 5. / 5, 5. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [2. / 6, 2. / 6, 4. / 6, 4. / 6, 6. / 6, 6. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_max_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='max', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1.5 / 2, 1.5 / 2]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2.5 / 3, 2.5 / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5],
[1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_average_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='average', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', ['f8', 'i8'])
@pytest.mark.parametrize('ser, exp', [
([1], [1.]),
([1, 2], [1. / 2, 2. / 2]),
([2, 2], [1. / 2, 2. / 2.]),
([1, 2, 3], [1. / 3, 2. / 3, 3. / 3]),
([1, 2, 2], [1. / 3, 2. / 3, 3. / 3]),
([4, 2, 1], [3. / 3, 2. / 3, 1. / 3],),
([1, 1, 5, 5, 3], [1. / 5, 2. / 5, 4. / 5, 5. / 5, 3. / 5]),
([1, 1, 3, 3, 5, 5], [1. / 6, 2. / 6, 3. / 6, 4. / 6, 5. / 6, 6. / 6]),
([-5, -4, -3, -2, -1], [1. / 5, 2. / 5, 3. / 5, 4. / 5, 5. / 5])])
def test_rank_first_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method='first', pct=True)
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
| bsd-3-clause |
openfisca/openfisca-qt | openfisca_qt/openFisca.py | 1 | 63097 | # -*- coding:utf-8 -*-
#
# This file is part of OpenFisca.
# OpenFisca is a socio-fiscal microsimulation software
# Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul
# Licensed under the terms of the GVPLv3 or later license
# (see openfisca/__init__.py for details)
# This file is inspired by Spyder, see openfisca/spyder.txt for more details
"""
OpenFisca
=========
Developed and maintained by Mahdi Ben Jelloul and Clément Schaff
"""
import os
import os.path as osp
import platform
import re
import sys
import openfisca_france
openfisca_france.init_country(qt = True)
import pkg_resources
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
from openfisca_qt.gui.utils.programs import is_module_installed
if is_module_installed('IPython.frontend.qt', '>=0.13'):
# Importing IPython will eventually set the QT_API environment variable
import IPython #@UnresolvedImport #@UnusedImport
if os.environ.get('QT_API', 'pyqt') == 'pyqt':
# If PyQt is the selected GUI toolkit (at this stage, only the
# bootstrap script has eventually set this option), switch to
# PyQt API #2 by simply importing the IPython qt module
os.environ['QT_API'] = 'pyqt'
try:
from IPython.external import qt #analysis:ignore
except ImportError:
# Avoid raising any error here: the spyderlib.requirements module
# will take care of it, in a user-friendly way (Tkinter message box
# if no GUI toolkit is installed)
pass
## Check requirements
from openfisca_qt.gui import requirements
#requirements.check_path() # TODO: Fix this
requirements.check_qt()
#
## Windows platforms only: support for hiding the attached console window
set_attached_console_visible = None
is_attached_console_visible = None
if os.name == 'nt':
from openfisca_qt.gui.utils.windows import (set_attached_console_visible,
is_attached_console_visible)
from openfisca_qt.gui.qt.QtGui import (QApplication, QMainWindow, QSplashScreen,
QPixmap, QMessageBox, QMenu, QColor, QShortcut,
QKeySequence, QDockWidget, QAction,
QDesktopServices)
from openfisca_qt.gui.qt.QtCore import SIGNAL, SLOT, QPoint, Qt, QSize, QByteArray, QUrl
from openfisca_qt.gui.qt.compat import (from_qvariant, getopenfilename,
getsavefilename)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
# when PySide is selected by the QT_API environment variable and when PyQt4
# is also installed (or any other Qt-based application prepending a directory
# containing incompatible Qt DLLs versions in PATH):
from openfisca_qt.gui.qt import QtSvg # analysis:ignore
# Local imports
from openfisca_qt.gui.utils import encoding, vcs, programs
try:
from openfisca_qt.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
#from openfisca_qt.widgets.pathmanager import PathManager #TODO
from openfisca_qt.gui.spyder_widgets.status import MemoryStatus, CPUStatus
from openfisca_qt.plugins.general.configdialog import (ConfigDialog, MainConfigPage,
ColorSchemeConfigPage)
from openfisca_qt.plugins.general.shortcuts import ShortcutsConfigPage
try:
# Assuming Qt >= v4.4
from openfisca_qt.plugins.general.onlinehelp import OnlineHelp
except ImportError:
# Qt < v4.4
OnlineHelp = None # analysis:ignore
#from openfisca_core import model
#from openfisca_core.simulations import SurveySimulation, ScenarioSimulation
from openfisca_france.scenarios import Scenario
from openfisca_france.surveys import SurveyScenario
__version__ = pkg_resources.require("openfisca-core")[0].version
from openfisca_qt import __project_url__, __forum_url__, widgets
from openfisca_qt.plugins.general.Parametres import ParamWidget
from openfisca_qt.plugins.scenario.graph import ScenarioGraphWidget
from openfisca_qt.plugins.scenario.table import ScenarioTableWidget
from openfisca_qt.plugins.survey.survey_explorer import SurveyExplorerWidget
#from openfisca_qt.plugins.survey.aggregates import AggregatesWidget
from openfisca_qt.plugins.survey.distribution import DistributionWidget
#from openfisca_qt.plugins.survey.inequality import InequalityWidget
from openfisca_qt.plugins.survey.Calibration import CalibrationWidget
from openfisca_qt.gui.utils.qthelpers import (
add_actions,
create_action,
create_bookmark_action,
create_module_bookmark_actions,
create_program_action,
create_python_script_action,
DialogManager,
file_uri,
get_std_icon,
keybinding, qapplication,
)
from openfisca_qt.gui.baseconfig import (
_,
get_conf_path,
get_module_data_path,
get_module_source_path,
STDERR,
STDOUT,
)
from openfisca_qt.gui.config import get_icon, get_image_path, get_shortcut
from openfisca_qt.gui.config import CONF, EDIT_EXT
from openfisca_qt.otherplugins import get_openfiscaplugins_mods
from openfisca_qt.gui.utils.iofuncs import load_session, save_session, reset_session
from openfisca_qt.gui.userconfig import NoDefault, NoOptionError
from openfisca_qt.gui.utils import module_completion
TEMP_SESSION_PATH = get_conf_path('.temp.session.tar')
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
#==============================================================================
# Openfisca's main window widgets utilities
#==============================================================================
def get_focus_widget_properties():
"""
Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)
"""
widget = QApplication.focusWidget()
try:
not_readonly = not widget.isReadOnly()
except:
not_readonly = False
console = False
readwrite_editor = not_readonly and not console
console = False
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
#TODO: Improve the stylesheet below for separator handles to be visible
# (in Qt, these handles are by default not visible on Windows!)
STYLESHEET="""
QSplitter::handle {
margin-left: 4px;
margin-right: 4px;
}
QSplitter::handle:horizontal {
width: 1px;
border-width: 0px;
background-color: lightgray;
}
QSplitter::handle:vertical {
border-top: 2px ridge lightgray;
border-bottom: 2px;
}
QMainWindow::separator:vertical {
margin-left: 1px;
margin-top: 25px;
margin-bottom: 25px;
border-left: 2px groove lightgray;
border-right: 1px;
}
QMainWindow::separator:horizontal {
margin-top: 1px;
margin-left: 5px;
margin-right: 5px;
border-top: 2px groove lightgray;
border-bottom: 2px;
}
"""
class MainWindow(QMainWindow):
"""
Openfisca main window
"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
openfisca_path = get_conf_path('.path')
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.debug = options.debug
self.debug_print("Start of MainWindow constructor")
self.setStyleSheet(STYLESHEET)
# Shortcut management data
self.shortcut_data = []
# Loading Openfisca path
self.path = []
self.project_path = []
if osp.isfile(self.openfisca_path):
self.path, _x = encoding.readlines(self.openfisca_path)
self.path = [name for name in self.path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
self.load_temp_session_action = create_action(self,
_("Reload last session"),
triggered=lambda:
self.load_session(TEMP_SESSION_PATH))
self.load_session_action = create_action(self,
_("Load session..."),
None, 'fileopen.png',
triggered=self.load_session,
tip=_("Load Openfisca session"))
self.save_session_action = create_action(self,
_("Save session and quit..."),
None, 'filesaveas.png',
triggered=self.save_session,
tip=_("Save current session "
"and quit application"))
# Plugins
self.scenario = None
self.survey_scenario = None
self.onlinehelp = None
self.parameters = None
self.test_case_graph = None
self.test_case_table = None
self.survey_explorer = None
self.aggregates = None
self.distribution = None
self.inequality = None
self.calibration = None
self.test_case_plugins = []
self.survey_plugins = []
self.thirdparty_plugins = []
# Preferences
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Actions
self.close_dockwidget_action = None
self.find_action = None
self.find_next_action = None
self.find_previous_action = None
self.replace_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.delete_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.windows_toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.test_case_toolbar = None
self.test_case_toolbar_actions = []
self.survey_toolbar = None
self.survey_toolbar_actions = []
# Set Window title and icon
title = "openFisca"
if self.debug:
title += " (DEBUG MODE)"
self.setWindowTitle(title)
icon_name = 'OpenFisca22.png' # needs an svg icon
self.setWindowIcon(get_icon(icon_name))
# Showing splash screen
pixmap = QPixmap(get_image_path('splash.png'), 'png')
self.splash = QSplashScreen(pixmap)
font = self.splash.font()
font.setPixelSize(10)
self.splash.setFont(font)
self.splash.show()
self.set_splash(_("Initializing..."))
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# Session manager
self.next_session_name = None
self.save_session_name = None
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def parameters_changed(self):
"""
Actions to perform after parameters are changed
"""
self.debug_print("Entering parameters_changed")
P, P_default = self.parameters.getParam(), self.parameters.getParam(defaut = True)
# TODO: include a reform here
self.reform = None
# self.scenario.set_param(P, P_default)
self.composition.set_reform(reform = False)
if not self.survey_explorer.get_option('bareme_only'):
self.survey_scenario.set_param(P, P_default)
self.survey_explorer.set_reform(reform = False)
self.survey_explorer.action_compute.setEnabled(True)
self.debug_print("Exiting parameters_changed")
def country_changed(self):
"""
Actions to perform after country is changed
"""
self.debug_print("Actions afer changing country : no action")
# self.main.register_test_case_widgets()
# self.main.register_survey_widgets()
# self.register_plugins()
def refresh_test_case_plugins(self):
"""
Refresh test case plugins after conputation
"""
self.test_case_graph.refresh_plugin()
self.test_case_table.refresh_plugin()
def refresh_survey_plugins(self):
"""
Refresh survey plugins after computation
"""
if CONF.get('survey', 'enable'):
for plugin in self.survey_plugins:
plugin.refresh_plugin()
def debug_print(self, message):
"""
Debug prints
"""
if self.debug:
print >>STDOUT, message
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""
Create and return toolbar with *title* and *object_name*
Parameters
----------
title : str
Title of the toolbar
onject_name : str
Object name
iconsize : int, default 24
Icon size
"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize( QSize(iconsize, iconsize) )
return toolbar
def setup(self):
"""
Setup main window
"""
self.debug_print("*** Start of MainWindow setup ***")
self.close_dockwidget_action = create_action(self,
_("Close current dockwidget"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action,
"_", "Close dockwidget", "Shift+Ctrl+F4")
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget)
self.register_shortcut(self.maximize_action, "_",
"Maximize dockwidget", "Ctrl+Alt+Shift+M")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", "F11")
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action, None]
# Main toolbar
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.connect(self.file_menu, SIGNAL("aboutToShow()"),
self.update_file_menu)
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu("?")
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Openfisca!"), 5000)
# Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon='configure.png',
triggered=self.edit_preferences)
self.register_shortcut(prefs_action, "_", "Preferences",
"Ctrl+Alt+Shift+P")
self.tools_menu_actions = [prefs_action,]
self.main_toolbar_actions += [prefs_action,]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon = 'exit.png', tip=_("Quit"),
triggered = SLOT('close()')
)
self.register_shortcut(quit_action, "_", "Quit", "Ctrl+Q")
self.file_menu_actions += [self.load_temp_session_action,
self.load_session_action,
self.save_session_action,
None, quit_action, None]
# self.action_calibrate = create_action(self, u'Caler les poids', shortcut = 'CTRL+K', icon = 'scale22.png', triggered = self.calibrate)
# self.action_inflate = create_action(self, u'Inflater les montants', shortcut = 'CTRL+I', icon = 'scale22.png', triggered = self.inflate)
TaxBenefitSystem = openfisca_france.init_country() # TODO: change this
tax_benefit_system_class = TaxBenefitSystem
# Parameters widget
if CONF.get('parameters', 'enable'):
self.set_splash(_("Loading Parameters..."))
self.parameters = ParamWidget(self, tax_benefit_system_class)
self.parameters.register_plugin()
# Test case widgets
self.tax_benefit_system = tax_benefit_system = tax_benefit_system_class()
self.scenario = tax_benefit_system.new_scenario()
self.register_test_case_widgets()
# Survey Widgets
self.register_survey_widgets()
self.register_plugins()
# ? menu
about_action = create_action(self,
_("About %s...") % "openFisca",
icon=get_std_icon('MessageBoxInformation'),
triggered=self.about)
report_action = create_action(self,
_("Report issue..."),
icon=get_icon('bug.png'),
triggered=self.report_issue
)
# Openfisca documentation
doc_path = get_module_data_path('openfisca_qt', relpath="doc",
attr_name='DOCPATH')
# * Trying to find the chm doc
openfisca_doc = osp.join(doc_path, "OpensiscaDoc.chm")
if not osp.isfile(openfisca_doc):
openfisca_doc = osp.join(doc_path, os.pardir, os.pardir,
"OpensiscaDoc.chm")
# * Trying to find the html doc
if not osp.isfile(openfisca_doc):
openfisca_doc = osp.join(doc_path, "index.html")
if not osp.isfile(openfisca_doc): # development version
openfisca_doc = osp.join(get_module_source_path("openfisca_qt"),
"doc", "_build", "html", "index.html")
openfisca_doc = file_uri(openfisca_doc)
doc_action = create_bookmark_action(self, openfisca_doc,
_("Openfisca documentation"), shortcut="F1",
icon=get_std_icon('DialogHelpButton'))
self.help_menu_actions = [about_action, report_action, doc_action]
# Status bar widgets
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_openfiscaplugins_mods(prefix='p_', extension='.py'):
try:
plugin = mod.PLUGIN_CLASS(self)
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except AttributeError, error:
print >>STDERR, "%s: %s" % (mod, str(error))
# View menu
self.windows_toolbars_menu = QMenu(_("Windows and toolbars"), self)
self.connect(self.windows_toolbars_menu, SIGNAL("aboutToShow()"),
self.update_windows_toolbars_menu)
self.view_menu.addMenu(self.windows_toolbars_menu)
reset_layout_action = create_action(self, _("Reset window layout"),
triggered=self.reset_window_layout)
quick_layout_menu = QMenu(_("Custom window layouts"), self)
ql_actions = []
for index in range(1, 4):
if index > 0:
ql_actions += [None]
qli_act = create_action(self,
_("Switch to/from layout %d") % index,
triggered=lambda i=index:
self.quick_layout_switch(i))
self.register_shortcut(qli_act, "_",
"Switch to/from layout %d" % index,
"Shift+Alt+F%d" % index)
qlsi_act = create_action(self, _("Set layout %d") % index,
triggered=lambda i=index:
self.quick_layout_set(i))
self.register_shortcut(qlsi_act, "_",
"Set layout %d" % index,
"Ctrl+Shift+Alt+F%d" % index)
ql_actions += [qli_act, qlsi_act]
add_actions(quick_layout_menu, ql_actions)
add_actions(self.view_menu, (None, self.maximize_action,
self.fullscreen_action, None,
reset_layout_action, quick_layout_menu,
None, self.close_dockwidget_action))
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
# add_actions(self.interact_menu, self.interact_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.emit(SIGNAL('all_actions_defined()'))
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
self.splash.hide()
# Enabling tear off for all menus except help menu
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def register_plugins(self):
for entry_point in pkg_resources.iter_entry_points('openfisca.plugins'):
plugin_registration = entry_point.load()
plugin_registration(qt_main_window = self)
def register_test_case_widgets(self):
"""
Register test case widgets
"""
# Test case widgets
self.set_splash(_("Loading Test case composer ..."))
value = CONF.get('parameters', 'datesim')
from datetime import datetime
datesim = datetime.strptime(value, "%Y-%m-%d").date()
self.scenario.year = datesim.year
self.composition = widgets.CompositionWidget(self.scenario, parent=self)
self.composition.register_plugin()
# Scenario Graph widget
if CONF.get('composition', 'graph/enable'):
self.set_splash(_("Loading ScenarioGraph..."))
self.test_case_graph = ScenarioGraphWidget(self)
self.test_case_graph.register_plugin()
# Scenario Table widget
if CONF.get('composition', 'table/enable'):
self.set_splash(_("Loading ScenarioTable..."))
self.test_case_table = ScenarioTableWidget(self)
self.test_case_table.register_plugin()
if self.test_case_toolbar is not None:
self.test_case_toolbar.clear()
else:
self.test_case_toolbar = self.create_toolbar(
_("Survey toolbar"),
"survey_toolbar"
)
add_actions(self.test_case_toolbar, self.test_case_toolbar_actions)
self.test_case_plugins = [
self.composition, self.test_case_graph, self.test_case_table
]
self.splash.hide()
def register_survey_widgets(self):
"""
Registers enabled survey widgets
"""
self.survey_explorer = SurveyExplorerWidget(self)
self.set_splash(_("Loading SurveyExplorer..."))
self.survey_explorer.register_plugin()
self.survey_plugins = [self.survey_explorer]
if CONF.get('survey', 'bareme_only') is False:
self.debug_print("Register survey widgets")
self.survey_scenario = SurveyScenario()
self.survey_explorer.initialize()
self.survey_explorer.load_data()
self.survey_scenario.set_config()
self.survey_scenario.set_param()
if self.survey_scenario.input_table is None:
self.debug_print("No survey data, dont load survey plugins")
return
# Calibration widget
if CONF.get('calibration', 'enable'):
self.set_splash(_("Loading calibration widget ..."))
self.calibration = CalibrationWidget(self)
self.calibration.register_plugin()
self.survey_plugins += [self.calibration]
# # Aggregates widget
# if CONF.get('aggregates', 'enable'):
# self.set_splash(_("Loading aggregates widget ..."))
# self.aggregates = AggregatesWidget(self)
# self.aggregates.register_plugin()
# self.survey_plugins += [ self.aggregates]
# Distribution widget
if CONF.get('distribution', 'enable'):
self.set_splash(_("Loading distribution widget ..."))
self.distribution = DistributionWidget(self)
self.distribution.register_plugin()
self.survey_plugins += [self.distribution]
# # Inequality widget
# if CONF.get('inequality', 'enable'):
# self.set_splash(_("Loading inequality widget ..."))
# self.inequality = InequalityWidget(self)
# self.inequality.register_plugin()
# self.survey_plugins += [ self.inequality]
# Creates survey_toolbar if needed
if self.survey_toolbar is not None:
self.survey_toolbar.clear()
else:
self.survey_toolbar = self.create_toolbar(
_("Survey toolbar"),
"survey_toolbar",
)
add_actions(self.survey_toolbar, self.survey_toolbar_actions)
self.splash.hide()
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
was triggered
"""
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if sys.platform == 'darwin':
if 'Openfisca.app' in __file__:
import subprocess
idx = __file__.index('Openfisca.app')
app_path = __file__[:idx]
subprocess.call(['open', app_path + 'Openfisca.app'])
def load_window_settings(self, prefix, default=False, section='main'):
"""
Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout
"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix + 'size')
prefs_dialog_size = get_func(section, prefix + 'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix + 'state', None)
pos = get_func(section, prefix + 'position')
is_maximized = get_func(section, prefix + 'is_maximized')
is_fullscreen = get_func(section, prefix + 'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen
def get_window_settings(self):
"""
Return current window settings
Symetric to the 'set_window_settings' setter
"""
size = self.window_size
# width, height = size.width(), size.height()
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = self.window_position
posx, posy = pos.x(), pos.y()
hexstate = str(self.saveState().toHex())
return hexstate, size, posx, posy, is_maximized, is_fullscreen
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""
Set window settings
Symetric to the 'get_window_settings' accessor
"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(str(hexstate)) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main'):
"""
Save current window settings with *prefix* in
the userconfig-based configuration, under *section*
"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
qba = self.saveState()
CONF.set(section, prefix+'state', str(qba.toHex()))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def setup_layout(self, default=False, recompute_test_case = True, changed_country = False):
"""
Setup window layout
"""
prefix = ('window') + '/'
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = self.load_window_settings(prefix, default)
if hexstate is None or changed_country:
# First Openfisca execution:
# trying to set-up the dockwidget/toolbar positions to the best
# appearance possible
splitting = (
(self.test_case_graph, self.parameters, Qt.Horizontal),
(self.parameters, self.composition, Qt.Vertical),
)
for first, second, orientation in splitting:
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
orientation)
for first, second in ((self.test_case_graph, self.test_case_table),
(self.test_case_table, self.aggregates),
(self.aggregates, self.distribution),
(self.distribution, self.inequality),
(self.inequality, self.survey_explorer),
(self.survey_explorer, self.calibration),
):
if first is not None and second is not None:
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
for plugin in [self.onlinehelp, ]+self.thirdparty_plugins:
if plugin is not None:
plugin.dockwidget.close()
for plugin in self.test_case_plugins + self.survey_plugins:
if plugin is not None:
plugin.dockwidget.raise_()
if self.survey_explorer is not None:
if CONF.get('survey', 'bareme_only'):
self.survey_explorer.dockwidget.hide()
else:
self.survey_explorer.dockwidget.show()
if recompute_test_case is True:
self.composition.set_scenario(self.scenario)
self.composition.compute()
self.test_case_graph.dockwidget.raise_()
self.set_window_settings(hexstate,window_size, prefs_dialog_size, pos,
is_maximized, is_fullscreen)
def reset_window_layout(self):
"""
Reset window layout to default
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_switch(self, index):
"""
Switch to quick layout number *index*
"""
if self.current_quick_layout == index:
self.set_window_settings(*self.previous_layout_settings)
self.current_quick_layout = None
else:
try:
settings = self.load_window_settings('layout_%d/' % index,
section='quick_layouts')
except NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%d has not yet "
"been defined.") % index)
return
self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
def quick_layout_set(self, index):
"""
Save current window settings as quick layout number *index*
"""
self.save_current_window_settings('layout_%d/' % index,
section='quick_layouts')
def plugin_focus_changed(self):
"""
Focus has changed from one plugin to another
"""
pass
# self.update_edit_menu()
# self.update_search_menu()
def update_file_menu(self):
"""
Update file menu
"""
self.load_temp_session_action.setEnabled(osp.isfile(TEMP_SESSION_PATH))
# widget, textedit_properties = get_focus_widget_properties()
# for widget in self.widgetlist:
# if widget.isvisible:
# widget.get_plugin_actions()
# add_actions(self.file_menu, self.file_menu_actions)
def update_windows_toolbars_menu(self):
"""
Update windows&toolbars menu
"""
self.windows_toolbars_menu.clear()
popmenu = self.createPopupMenu()
add_actions(self.windows_toolbars_menu, popmenu.actions())
def set_splash(self, message):
"""
Set splash message
"""
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""
closeEvent reimplementation
"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""
Reimplement Qt method
"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def moveEvent(self, event):
"""
Reimplement Qt method
"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
def closing(self, cancelable=False):
"""
Exit tasks
"""
if self.already_closed or self.is_starting_up:
return True
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""
Add QDockWidget and toggleViewAction
"""
self.debug_print('Adding dockwidget ' + str(child))
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current plugin")
tip = _("Maximize current plugin to fit the whole "
"application window")
icon = "maximize.png"
else:
text = _("Restore current plugin")
tip = _("Restore current plugin to its original size and "
"position within the application window")
icon = "unmaximize.png"
self.maximize_action.setText(text)
self.maximize_action.setIcon(get_icon(icon))
self.maximize_action.setToolTip(tip)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# No plugin is currently maximized: maximizing focus plugin
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = "window_nofullscreen.png"
else:
icon = "window_fullscreen.png"
self.fullscreen_action.setIcon(get_icon(icon))
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
def about(self):
"""
About openFisca
"""
import openfisca_qt.gui.qt.QtCore
QMessageBox.about(self,
_("About %s") % "openFisca",
u''' <b>openFisca</b><sup>beta</sup> v %s
<p> %s
<p> Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul
<p> Tous droits réservés
<p> License GPL version 3 ou supérieure
<p> Python %s - Qt %s - PyQt %s on %s'''
% (__version__, __project_url__, platform.python_version(),
openfisca_qt.gui.qt.QtCore.__version__, openfisca_qt.gui.qt.__version__, platform.system()))
# __project_url__, __forum_url__,
# platform.python_version(),
# openfisca_qt.gui.qt.QtCore.__version__,
# openfisca_qt.gui.qt.API_NAME,
# openfisca_qt.gui.qt.__version__,
# platform.system()) )
# """<b>%s %s</b> %s
def report_issue(self):
pass
# import urllib
# import spyderlib
# # Get Mercurial revision for development version
# revlink = ''
# spyderpath = spyderlib.__path__[0]
# if osp.isdir(osp.abspath(spyderpath)):
# full, short, branch = vcs.get_hg_revision(osp.dirname(spyderpath))
# if full:
# revlink = " (%s:r%s)" % (short, full)
# issue_template = """\
#Spyder Version: %s%s
#Python Version: %s
#Qt Version: %s, %s %s on %s
#
#What steps will reproduce the problem?
#1.
#2.
#3.
#
#What is the expected output? What do you see instead?
#
#
#Please provide any additional information below.
#""" % (__version__,
# revlink,
# platform.python_version(),
# openfisca_qt.gui.qt.QtCore.__version__,
# openfisca_qt.gui.qt.API_NAME,
# openfisca_qt.gui.qt.__version__,
# platform.system())
#
# url = QUrl("http://code.google.com/p/spyderlib/issues/entry")
# url.addEncodedQueryItem("comment", urllib.quote(issue_template))
# QDesktopServices.openUrl(url)
#---- Global callbacks (called from plugins)
# def get_current_editor_plugin(self):
# """Return editor plugin which has focus:
# console, extconsole, editor, inspector or historylog"""
# if self.light:
# return self.extconsole
# widget = QApplication.focusWidget()
# from openfisca_qt.gui.spyder_widgets.editor import TextEditBaseWidget
# from openfisca_qt.gui.spyder_widgets.shell import ShellBaseWidget
# if not isinstance(widget, (TextEditBaseWidget, ShellBaseWidget)):
# return
# for plugin in self.widgetlist:
# if plugin.isAncestorOf(widget):
# return plugin
# else:
# # External Editor window
# plugin = widget
# from openfisca_qt.gui.spyder_widgets.editor import EditorWidget
# while not isinstance(plugin, EditorWidget):
# plugin = plugin.parent()
# return plugin
# def find(self):
# """
# Global find callback
# """
# plugin = self.get_current_editor_plugin()
# if plugin is not None:
# plugin.find_widget.show()
# plugin.find_widget.search_text.setFocus()
# return plugin
#
# def find_next(self):
# """Global find next callback"""
# plugin = self.get_current_editor_plugin()
# if plugin is not None:
# plugin.find_widget.find_next()
#
# def find_previous(self):
# """Global find previous callback"""
# plugin = self.get_current_editor_plugin()
# if plugin is not None:
# plugin.find_widget.find_previous()
#
# def replace(self):
# """Global replace callback"""
# plugin = self.find()
# if plugin is not None:
# plugin.find_widget.show_replace()
#
# def global_callback(self):
# """Global callback"""
# widget = QApplication.focusWidget()
# action = self.sender()
# callback = from_qvariant(action.data(), unicode)
## from openfisca_qt.gui.spyder_widgets.editor import TextEditBaseWidget
## if isinstance(widget, TextEditBaseWidget):
# getattr(widget, callback)()
#
#---- PYTHONPATH management, etc.
def get_openfisca_pythonpath(self):
"""Return Openfisca PYTHONPATH"""
return self.path+self.project_path
def add_path_to_sys_path(self):
"""Add Openfisca path to sys.path"""
for path in reversed(self.get_openfisca_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Openfisca path from sys.path"""
sys_path = sys.path
while sys_path[1] in self.get_openfisca_pythonpath():
sys_path.pop(1)
def path_manager_callback(self):
"""Openfisca path manager"""
self.remove_path_from_sys_path()
# project_pathlist = self.projectexplorer.get_pythonpath()
# dialog = PathManager(self, self.path, project_pathlist, sync=True)
# self.connect(dialog, SIGNAL('redirect_stdio(bool)'),
# self.redirect_internalshell_stdio)
# dialog.exec_()
self.add_path_to_sys_path()
encoding.writelines(self.path, self.openfisca_path) # Saving path
def pythonpath_changed(self):
"""Project Explorer PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projectexplorer.get_pythonpath()
self.add_path_to_sys_path()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""
Apply settings changed in 'Preferences' dialog box
"""
qapp = QApplication.instance()
qapp.setStyle(CONF.get('main', 'windows_style', self.default_style))
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features|QDockWidget.DockWidgetVerticalTitleBar
child.dockwidget.setFeatures(features)
child.update_margins()
self.apply_statusbar_settings()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
def edit_preferences(self):
"""
Edit openFisca preferences
"""
dlg = ConfigDialog(self)
self.connect(dlg, SIGNAL("size_change(QSize)"),
lambda s: self.set_prefs_size(s))
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.onlinehelp, self.parameters] + self.survey_plugins + self.test_case_plugins + self.thirdparty_plugins:
if plugin is not None:
print plugin
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
self.connect(dlg.pages_widget, SIGNAL("currentChanged(int)"),
self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""
Preference page index has changed
"""
self.prefs_index = index
def set_prefs_size(self, size):
"""
Save preferences dialog size
"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
default=NoDefault):
"""
Register QAction or QShortcut to openFisca main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut,
context, name, default) )
self.apply_shortcuts()
def apply_shortcuts(self):
"""
Apply shortcuts settings to all widgets/plugins
"""
toberemoved = []
for index, (qobject, context, name,
default) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name, default) )
try:
if isinstance(qobject, QAction):
qobject.setShortcut(keyseq)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
#---- Sessions
def load_session(self, filename=None):
"""Load session"""
if filename is None:
filename, _selfilter = getopenfilename(self, _("Open session"),
os.getcwdu(), _("Openfisca sessions")+" (*.session.tar)")
if not filename:
return
if self.close():
self.next_session_name = filename
def save_session(self):
"""
Save session and quit application
"""
filename, _selfilter = getsavefilename(self, _("Save session"),
os.getcwdu(), _("openFisca sessions")+" (*.session.tar)")
if filename:
if self.close():
self.save_session_name = filename
def get_options():
"""
Convert options into commands
return commands, message
"""
import optparse
parser = optparse.OptionParser(usage="ope,fisca [options]")
parser.add_option('--session', dest="startup_session", default='',
help="Startup session")
parser.add_option('--defaults', dest="reset_to_defaults",
action='store_true', default=False,
help="Reset to configuration settings to defaults")
parser.add_option('--reset', dest="reset_session",
action='store_true', default=False,
help="Remove all configuration files!")
parser.add_option('--optimize', dest="optimize",
action='store_true', default=False,
help="Optimize Openfisca bytecode (this may require "
"administrative privileges)")
parser.add_option('-w', '--workdir', dest="working_directory", default=None,
help="Default working directory")
parser.add_option('-d', '--debug', dest="debug", action='store_true',
default=False,
help="Debug mode (stds are not redirected)")
options, _args = parser.parse_args()
return options
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
app = qapplication()
#----Monkey patching PyQt4.QtGui.QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from openfisca_qt.gui.qt import QtGui
QtGui.QApplication = FakeQApplication
#----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect openfisca internals
"""
def __init__(self, app, window):
self.app = app
self.window = window
def run_spyder(app, options):
"""
Create and show Openfisca's main window
Patch matplotlib for figure integration
Start QApplication event loop
"""
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
# if main.console is not None:
# try:
# main.console.shell.exit_interpreter()
# except BaseException:
# pass
raise
main.show()
main.post_visible_setup()
app.exec_()
return main
def __remove_temp_session():
if osp.isfile(TEMP_SESSION_PATH):
os.remove(TEMP_SESSION_PATH)
def main():
"""
Session manager
"""
__remove_temp_session()
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options = get_options()
app = initialize()
if options.reset_session:
# <!> Remove all configuration files!
reset_session()
return
elif options.reset_to_defaults:
# Reset openFisca settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
# import spyderlib TODO: test
# programs.run_python_script(module="compileall",
# args=[spyderlib.__path__[0]], p_args=['-O'])
return
options.debug = True
if CONF.get('main', 'crash', False):
CONF.set('main', 'crash', False)
QMessageBox.information(None, "openFisca",
u"openFisca crashed during last session.<br><br>"
u"If openFisca does not start at all and <u>before submitting a "
u"bug report</u>, please try to reset settings to defaults by "
u"running openFisca with the command line option '--reset':<br>"
u"<span style=\'color: #555555\'><b>python openFisca --reset"
u"</b></span><br><br>"
u"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
u"this command will remove all your openFisca configuration files "
u"located in '%s').<br><br>"
u"If restoring the default settings does not help, please take "
u"the time to search for <a href=\"%s\">known bugs</a> or "
u"<a href=\"%s\">discussions</a> matching your situation before "
u"eventually creating a new issue <a href=\"%s\">here</a>. "
u"Your feedback will always be greatly appreciated."
u"" % (get_conf_path(), __project_url__,
__forum_url__, __project_url__))
next_session_name = options.startup_session
while isinstance(next_session_name, basestring):
if next_session_name:
error_message = load_session(next_session_name)
if next_session_name == TEMP_SESSION_PATH:
__remove_temp_session()
if error_message is None:
CONF.load_from_ini()
else:
print error_message
QMessageBox.critical(None, "Load session",
u"<b>Unable to load '%s'</b>"
u"<br><br>Error message:<br>%s"
% (osp.basename(next_session_name),
error_message))
mainwindow = None
try:
mainwindow = run_spyder(app, options)
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('openfisca_crash.log', 'wb'))
if mainwindow is None:
# An exception occured
return
next_session_name = mainwindow.next_session_name
save_session_name = mainwindow.save_session_name
if next_session_name is not None:
#-- Loading session
# Saving current session in a temporary file
# but only if we are not currently trying to reopen it!
if next_session_name != TEMP_SESSION_PATH:
save_session_name = TEMP_SESSION_PATH
if save_session_name:
#-- Saving session
error_message = save_session(save_session_name)
if error_message is not None:
QMessageBox.critical(None, "Save session",
u"<b>Unable to save '%s'</b>"
u"<br><br>Error message:<br>%s"
% (osp.basename(save_session_name),
error_message))
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
| agpl-3.0 |
jgowans/directionFinder_backend | bin/calibrate_time_domain.py | 1 | 7095 | #!/usr/bin/env python
from directionFinder_backend.correlator import Correlator
import scipy.signal as signal
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import logging
import time
import timeit
import json
import datetime
from colorlog import ColoredFormatter
def test_cross_first_vs_resample_first():
c.upsample_factor = 100
c.subsignal_length_max = 2**19
c.time_domain_padding = 100
sig_lengths = [np.int(x) for x in np.linspace(12000, 16000, 20)]
x_first_times = []
resample_first_times = []
for sig_len in sig_lengths:
c.subsignal_length_max = sig_len
x_first_times.append(
timeit.timeit(
stmt = "c.do_time_domain_cross_correlation_cross_first()",
setup = "from __main__ import c",
number = 1))
resample_first_times.append(
timeit.timeit(
stmt = "c.do_time_domain_cross_correlation_resample_first()",
setup = "from __main__ import c",
number = 1))
print("{l} -> {t}".format(l = sig_len, t = resample_first_times[-1]))
fig = plt.figure()
ax1 = fig.gca()
ax1.plot(sig_lengths, x_first_times, 'b', label='x first')
ax1.plot(sig_lengths, resample_first_times, 'r', label="resample first")
#ax2 = ax1.twinx()
#ax2.plot(sig_lengths, resample_first_times, 'r', label="resample first")
ax1.legend()
#ax2.legend()
plt.show()
def plot_interpolated_vs_non_interpolated():
""" This does not work anymore due to the interface in
Correlator being changed. Keeping for pyplot reference
"""
(x_first, x_first_time), (x_first_upped, x_first_time_upped) = c.do_time_domain_cross_correlation_cross_first()
# normalise:
x_first_upped = x_first_upped/(max(x_first))
x_first = x_first/max(x_first)
x_first_time *= 1e6
x_first_time_upped *= 1e6
print(time.time() - t0)
t0 = time.time()
print(time.time() - t0)
#print(np.argmax(c.time_domain_correlations[1]))
#plt.plot(c.time_domain_correlation_time, c.time_domain_correlations[0], marker='.')
ax1 = plt.gca()
ax1.plot(up_first_time, up_first, marker='.', color='b')
#ax.plot(x_first_time_upped, x_first_upped, color='b', linewidth=2, marker='.', markersize=10, label="upsampled")
plt.plot(x_first_time_upped, x_first_upped, color='b', linewidth=2,label="upsampled")
ax.plot(x_first_time, x_first, marker='.', color='r', linewidth=2, markersize=15, label="Raw")
xy = (x_first_time[np.argmax(x_first)-1], x_first[np.argmax(x_first)-1])
print(xy)
ax.annotate('higher', xy=xy,
xytext=(0.1, 0.4), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.09, width=2),
horizontalalignment='right', verticalalignment='top',)
xy = (x_first_time[np.argmax(x_first)+1], x_first[np.argmax(x_first)+1])
ax.annotate('lower', xy=xy,
xytext=(0.6, 0.3), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.09, width=2),
horizontalalignment='right', verticalalignment='top',)
ax.set_title("Comparison of raw time domain cross correlation with upsampled version")
ax.set_xlabel("Time shift (us)")
ax.set_ylabel("Cross correlation value (normalised)")
ax.legend()
plt.show()
def do_calibration(c, write = False):
c.upsample_factor = 1000
c.subsignal_length_max = 2**19
c.time_domain_padding = 1000
c.do_time_domain_cross_correlation()
logger.info(c.time_domain_cross_correlations_peaks)
logger.info("Step: {}".format(c.time_domain_correlations_times[(0,1)][1] - c.time_domain_correlations_times[(0,1)][0]))
offsets = {}
for baseline, correlation in c.time_domain_correlations_values.items():
baseline_str = "{a}x{b}".format(a = baseline[0], b = baseline[1])
offsets[baseline_str] = c.time_domain_cross_correlations_peaks[baseline]
offsets["metadata"] = {}
offsets["metadata"]["created"] = datetime.datetime.utcnow().isoformat("T")
offsets_json = json.dumps(offsets, indent=2)
if write == True:
with open('time_domain_calibration.json', 'w') as f:
f.write(offsets_json)
def plot_calibration(c, insert = True):
fig = plt.figure()
ax = plt.gca()
for baseline, correlation in c.time_domain_correlations_values.items():
correlation_max_val = correlation[np.argmax(correlation)]
correlation_max_time = c.time_domain_correlations_times[baseline][np.argmax(correlation)]
lines = ax.plot(
c.time_domain_correlations_times[baseline] * 1e9,
correlation / correlation_max_val,
label = baseline)
#ax.plot([correlation_max_time, correlation_max_time], [0, correlation_max_val], color = lines[0].get_color())
ax.set_ylim(top=1.2)
ax.xaxis.set_ticks(np.arange(
-200,
200,
2))
if insert == True:
#axins = zoomed_inset_axes(ax, 5, loc=1)
axins = zoomed_inset_axes(ax, 9, loc=1)
for baseline, correlation in c.time_domain_correlations_values.items():
correlation_max_val = correlation[np.argmax(correlation)]
correlation_max_time = c.time_domain_correlations_times[baseline][np.argmax(correlation)]
lines = axins.plot(
c.time_domain_correlations_times[baseline] * 1e9,
correlation / correlation_max_val,
label = baseline,
linewidth=2)
#axins.plot([correlation_max_time, correlation_max_time], [0, correlation_max_val], color = lines[0].get_color())
#axins.set_xlim(-0.4, 2.9)
axins.set_xlim(-0.4, 0.4)
#axins.set_ylim(0.90, 1.04)
axins.set_ylim(0.96, 1.03)
#axins.xaxis.set_ticks(np.arange(-0.4, 2.9, 0.4))
axins.xaxis.set_ticks(np.arange(-0.4, 0.4, 0.2))
mark_inset(ax, axins, loc1=2, loc2=3, fc='none', ec='0.5')
plt.xticks(visible=True)
plt.yticks(visible=False)
ax.set_title("Time domain cross correlations with broad band noise\n arriving through full RF chain AFTER calibration")
ax.set_xlabel("Time delay (ns)")
ax.set_ylabel("Cross correlation value (normalised)")
ax.legend(loc=2)
#ax.legend()
plt.show()
if __name__ == '__main__':
logger = logging.getLogger('main')
handler = logging.StreamHandler()
colored_formatter = ColoredFormatter("%(log_color)s%(asctime)s%(levelname)s:%(name)s:%(message)s")
handler.setFormatter(colored_formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
c = Correlator(logger = logger.getChild('correlator'))
#c.apply_time_domain_calibration("./time_domain_calibration.json")
#c.apply_cable_length_calibrations('../config/cable_length_calibration.json')
c.fetch_time_domain_snapshot(force=True)
do_calibration(c, write = False)
plot_calibration(c, insert = True)
| mit |
dustinvtran/bayesrl | bayesrl/environments/pomdpgw.py | 1 | 11259 | import numpy as np
from ..utils import check_random_state
# Maze state is represented as a 2-element NumPy array: (Y, X). Increasing Y is South.
# Possible actions, expressed as (delta-y, delta-x).
maze_actions = {
'N': np.array([-1, 0]),
'S': np.array([1, 0]),
'E': np.array([0, 1]),
'W': np.array([0, -1]),
}
def parse_topology(topology):
return np.array([list(row) for row in topology])
class Maze(object):
"""
Simple wrapper around a NumPy 2D array to handle flattened indexing and staying in bounds.
"""
def __init__(self, topology, true_obs_prob=.8, easy_obs_model=True):
self.topology = parse_topology(topology)
self.flat_topology = self.topology.ravel()
self.shape = self.topology.shape
self.true_obs_prob = true_obs_prob
self.easy_obs_model = easy_obs_model
#If the observation model is easy, the agent can observe which directions have walls
#If the observation model is not easy, the agent only observes how many of its four neighbors are walls.
self.num_observations = 16 if easy_obs_model else 5
def in_bounds_flat(self, position):
return 0 <= position < np.product(self.shape)
def in_bounds_unflat(self, position):
return 0 <= position[0] < self.shape[0] and 0 <= position[1] < self.shape[1]
def get_flat(self, position):
if not self.in_bounds_flat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.flat_topology[position]
def get_unflat(self, position):
if not self.in_bounds_unflat(position):
raise IndexError("Position out of bounds: {}".format(position))
return self.topology[tuple(position)]
def flatten_index(self, index_tuple):
return np.ravel_multi_index(index_tuple, self.shape)
def unflatten_index(self, flattened_index):
return np.unravel_index(flattened_index, self.shape)
def flat_positions_containing(self, x):
return list(np.nonzero(self.flat_topology == x)[0])
def flat_positions_not_containing(self, x):
return list(np.nonzero(self.flat_topology != x)[0])
def get_inbound_index(self, index_tuple):
x = min(max(index_tuple[0],0),self.shape[0]-1)
y = min(max(index_tuple[1],0),self.shape[1]-1)
return x, y
def true_observation(self, index_tuple):
it = index_tuple
if type(it) == np.int64:
it = self.unflatten_index(it)
neighbors = [(it[0]+1,it[1]),
(it[0]-1,it[1]),
(it[0],it[1]+1),
(it[0],it[1]-1)]
neighbors = [n for n in neighbors if self.in_bounds_unflat(n)]
if_wall = [self.get_unflat(n)=='#' for n in neighbors]
if self.easy_obs_model:
obs = sum(if_wall)
else:
obs = sum(np.array([8,4,2,1])*if_wall)
return obs
def obs_distribution(self, index_tuple):
if type(index_tuple) == int:
index_tuple = self.unflatten_index(index_tuple)
other_obs_prob = (1-self.true_obs_prob)/(self.num_observations-1)
obs_distribution = [other_obs_prob] * self.num_observations
true_obs = self.true_observation(index_tuple)
obs_distribution[true_obs] = self.true_obs_prob
return obs_distribution
def get_all_obs_distribution(self):
return [self.obs_distribution((x,y)) for x in range(self.shape[0]) for y in range(self.shape[1])]
def observation(self, index_tuple):
if type(index_tuple) == int:
index_tuple = self.unflatten_index(index_tuple)
obs_distribution = self.obs_distribution(index_tuple)
obs = np.random.multinomial(1, obs_distribution)
return obs.tolist().index(1)
def __str__(self):
return '\n'.join(''.join(row) for row in self.topology.tolist())
def __repr__(self):
return 'Maze({})'.format(repr(self.topology.tolist()))
def move_avoiding_walls(maze, position, action):
"""
Return the new position after moving, and the event that happened ('hit-wall' or 'moved').
Works with the position and action as a (row, column) array.
"""
# Compute new position
new_position = position + action
# Compute collisions with walls, including implicit walls at the ends of the world.
if not maze.in_bounds_unflat(new_position) or maze.get_unflat(new_position) == '#':
return position, 'hit-wall'
return new_position, 'moved'
class GridWorld(object):
"""
A simple task in a maze: get to the goal.
Parameters
----------
maze : list of strings or lists
maze topology (see below)
rewards: dict of string to number. default: {'*': 10}.
Rewards obtained by being in a maze grid with the specified contents,
or experiencing the specified event (either 'hit-wall' or 'moved'). The
contributions of content reward and event reward are summed. For
example, you might specify a cost for moving by passing
rewards={'*': 10, 'moved': -1}.
terminal_markers: sequence of chars, default '*'
A grid cell containing any of these markers will be considered a
"terminal" state.
action_error_prob: float
With this probability, the requested action is ignored and a random
action is chosen instead.
random_state: None, int, or RandomState object
For repeatable experiments, you can pass a random state here. See
http://scikit-learn.org/stable/modules/generated/sklearn.utils.check_random_state.html
Notes
-----
Maze topology is expressed textually. Key:
'#': wall
'.': open (really, anything that's not '#')
'*': goal
'o': origin
"""
def __init__(self, maze, rewards={'*': 10}, terminal_markers='*',
action_error_prob=0, random_state=None, directions="NSEW", pomdp=False):
self.maze = Maze(maze) if not isinstance(maze, Maze) else maze
self.rewards = rewards
self.terminal_markers = terminal_markers
self.action_error_prob = action_error_prob
self.random_state = check_random_state(random_state)
self.actions = [maze_actions[direction] for direction in directions]
self.num_actions = len(self.actions)
self.state = None
self.reset()
self.num_states = self.maze.shape[0] * self.maze.shape[1]
self.pomdp = pomdp
def __repr__(self):
return 'GridWorld(maze={maze!r}, rewards={rewards}, terminal_markers={terminal_markers}, action_error_prob={action_error_prob})'.format(**self.__dict__)
def reset(self):
"""
Reset the position to a starting position (an 'o'), chosen at random.
"""
options = self.maze.flat_positions_containing('o')
self.state = options[self.random_state.choice(len(options))]
def is_terminal(self, state):
"""Check if the given state is a terminal state."""
return self.maze.get_flat(state) in self.terminal_markers
def observe(self):
"""
Return the current state as an integer.
The state is the index into the flattened maze.
"""
o = self.maze.observation(self.state) if self.pomdp else self.state
return o
def perform_action(self, action_idx):
"""Perform an action (specified by index), yielding a new state and reward."""
# In the absorbing end state, nothing does anything.
if self.is_terminal(self.state):
return self.observe(), 0
if self.action_error_prob and self.random_state.rand() < self.action_error_prob:
action_idx = self.random_state.choice(self.num_actions)
action = self.actions[action_idx]
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(self.state), action)
self.state = self.maze.flatten_index(new_state_tuple)
reward = self.rewards.get(self.maze.get_flat(self.state), 0) + self.rewards.get(result, 0)
return self.observe(), reward
def as_mdp(self):
transition_probabilities = np.zeros((self.num_states, self.num_actions, self.num_states))
rewards = np.zeros((self.num_states, self.num_actions, self.num_states))
action_rewards = np.zeros((self.num_states, self.num_actions))
destination_rewards = np.zeros(self.num_states)
for state in range(self.num_states):
destination_rewards[state] = self.rewards.get(self.maze.get_flat(state), 0)
is_terminal_state = np.zeros(self.num_states, dtype=np.bool)
for state in range(self.num_states):
if self.is_terminal(state):
is_terminal_state[state] = True
transition_probabilities[state, :, state] = 1.
else:
for action in range(self.num_actions):
new_state_tuple, result = move_avoiding_walls(self.maze, self.maze.unflatten_index(state), self.actions[action])
new_state = self.maze.flatten_index(new_state_tuple)
transition_probabilities[state, action, new_state] = 1.
action_rewards[state, action] = self.rewards.get(result, 0)
# Now account for action noise.
transitions_given_random_action = transition_probabilities.mean(axis=1, keepdims=True)
transition_probabilities *= (1 - self.action_error_prob)
transition_probabilities += self.action_error_prob * transitions_given_random_action
rewards_given_random_action = action_rewards.mean(axis=1, keepdims=True)
action_rewards = (1 - self.action_error_prob) * action_rewards + self.action_error_prob * rewards_given_random_action
rewards = action_rewards[:, :, None] + destination_rewards[None, None, :]
rewards[is_terminal_state] = 0
return transition_probabilities, rewards
def get_max_reward(self):
transition_probabilities, rewards = self.as_mdp()
return rewards.max()
### Old API, where terminal states were None.
def observe_old(self):
return None if self.is_terminal(self.state) else self.state
def perform_action_old(self, action_idx):
new_state, reward = self.perform_action(action_idx)
if self.is_terminal(new_state):
return None, reward
else:
return new_state, reward
samples = {
'trivial': [
'###',
'#o#',
'#.#',
'#*#',
'###'],
'larger': [
'#########',
'#..#....#',
'#..#..#.#',
'#..#..#.#',
'#..#.##.#',
'#....*#.#',
'#######.#',
'#o......#',
'#########']
}
def construct_cliff_task(width, height, goal_reward=50, move_reward=-1, cliff_reward=-100, **kw):
"""
Construct a 'cliff' task, a GridWorld with a "cliff" between the start and
goal. Falling off the cliff gives a large negative reward and ends the
episode.
Any other parameters, like action_error_prob, are passed on to the
GridWorld constructor.
"""
maze = ['.' * width] * (height - 1) # middle empty region
maze.append('o' + 'X' * (width - 2) + '*') # bottom goal row
rewards = {
'*': goal_reward,
'moved': move_reward,
'hit-wall': move_reward,
'X': cliff_reward
}
return GridWorld(maze, rewards=rewards, terminal_markers='*X', **kw)
| mit |
mfjb/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
briandrawert/GillesPy | gillespy/gillespy.py | 2 | 52338 | """
A simple toolkit for creating and simulating discrete stochastic models in
python.
This serves primarily as a python wrapper for the C-based solvers within
StochKit2. The gillespy.Model class provides nearly all of the functionality
present in this project.
This version is updated (4/2017) to contain documentation in a more reasonable
format. This does not necessarily mean it is perfect, but it is certainly an
improvement over the original.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from collections import OrderedDict
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import tempfile
import uuid
import subprocess
import types
import random
try:
import lxml.etree as etree
no_pretty_print = False
except:
import xml.etree.ElementTree as etree
import xml.dom.minidom
import re
no_pretty_print = True
try:
import scipy.io as spio
isSCIPY = True
except:
pass
import os
import sys
try:
import shutil
import numpy
except:
pass
import pdb
def import_SBML(filename, name=None, gillespy_model=None):
"""
SBML to GillesPy model converter. NOTE: non-mass-action rates
in terms of concentrations may not be converted for population
simulation. Use caution when importing SBML.
Attributes
----------
filename : str
Path to the SBML file for conversion.
name : str
Name of the resulting model.
gillespy_model : gillespy.Model
If desired, the SBML model may be added to an existing GillesPy model.
"""
try:
from .SBMLimport import convert
except ImportError:
raise ImportError('SBML conversion not imported successfully')
return convert(filename, modelName = name, gillespy_model = gillespy_model)
class Model(object):
"""
Representation of a well mixed biochemical model. Contains reactions,
parameters, species.
Attributes
----------
name : str
The name of the model, or an annotation describing it.
population : bool
The type of model being described. A discrete stochastic model is a
population model (True), a deterministic model is a concentration model
(False). Automatic conversion from population to concentration models
may be used, by setting the volume parameter.
volume : float
The volume of the system matters when converting to from population to
concentration form. This will also set a parameter "vol" for use in
custom (i.e. non-mass-action) propensity functions.
tspan : numpy ndarray
The timepoints at which the model should be simulated. If None, a
default timespan is added. May be set later, see Model.timespan
annotation : str (optional)
Optional further description of model
"""
def __init__(self, name="", population=True, volume=1.0, tspan=None, annotation="model"):
""" Create an empty model. """
# The name that the model is referenced by (should be a String)
self.name = name
self.annotation = annotation
# Dictionaries with Species, Reactions and Parameter objects.
# Species, Reaction and Paramter names are used as keys.
self.listOfParameters = OrderedDict()
self.listOfSpecies = OrderedDict()
self.listOfReactions = OrderedDict()
# This defines the unit system at work for all numbers in the model
# It should be a logical error to leave this undefined, subclasses
# should set it
if population == True:
self.units = "population"
else:
self.units = "concentration"
if volume != 1.0:
raise Warning("Concentration models account for volume implicitly, explicit volume definition is not required. Note: concentration models may only be simulated deterministically.")
self.volume = volume
# Dict that holds flattended parameters and species for
# evaluation of expressions in the scope of the model.
self.namespace = OrderedDict([])
if tspan is None:
self.timespan(numpy.linspace(0,20,401))
else: self.timespan(tspan)
def serialize(self):
""" Serializes the Model object to valid StochML. """
self.resolve_parameters()
doc = StochMLDocument().from_model(self)
return doc.to_string()
def update_namespace(self):
""" Create a dict with flattened parameter and species objects. """
self.namespace = OrderedDict([])
for param in self.listOfParameters:
self.namespace[param]=self.listOfParameters[param].value
# Dictionary of expressions that can be evaluated in the scope of this
# model.
self.expressions = {}
def get_species(self, sname):
"""
Returns a species object by name.
Attributes
----------
sname : str
Name of the species object to be returned.
"""
return self.listOfSpecies[sname]
def get_all_species(self):
"""
Returns a dict of all species in the model, of the form:
{name : species object}
"""
return self.listOfSpecies
def add_species(self, obj):
"""
Adds a species, or list of species to the model.
Attributes
----------
obj : Species, or list of Species
The species or list of species to be added to the model object.
"""
if isinstance(obj, Species):
if obj.name in self.listOfSpecies:
raise ModelError("Can't add species. A species with that \
name alredy exisits.")
self.listOfSpecies[obj.name] = obj;
else: # obj is a list of species
for S in obj:
if S.name in self.listOfSpecies:
raise ModelError("Can't add species. A species with that \
name alredy exisits.")
self.listOfSpecies[S.name] = S;
return obj
def delete_species(self, obj):
"""
Removes a species object by name.
Attributes
----------
sname : str
Name of the species object to be removed.
"""
self.listOfSpecies.pop(obj)
def delete_all_species(self):
"""
Removes all species from the model object.
"""
self.listOfSpecies.clear()
def set_units(self, units):
"""
Sets the units of the model to either "population" or "concentration"
Attributes
----------
units : str
Either "population" or "concentration"
"""
if units.lower() == 'concentration' or units.lower() == 'population':
self.units = units.lower()
else:
raise ModelError("units must be either concentration or \
population (case insensitive)")
def get_parameter(self, pname):
"""
Returns a parameter object by name.
Attributes
----------
pname : str
Name of the parameter object to be returned.
"""
try:
return self.listOfParameters[pname]
except:
raise ModelError("No parameter named "+pname)
def get_all_parameters(self):
"""
Returns a dict of all parameters in the model, of the form:
{name : parameter object}
"""
return self.listOfParameters
def add_parameter(self,params):
"""
Adds a parameter, or list of parameters to the model.
Attributes
----------
obj : Parameter, or list of Parameters
The parameter or list of parameters to be added to the model object.
"""
# TODO, make sure that you don't overwrite an existing parameter??
if isinstance(params, list):
for p in params:
self.listOfParameters[p.name] = p
else:
if isinstance(params, Parameter):
self.listOfParameters[params.name] = params
else:
raise Exception("params should be of type `Parameter` and is instead of type {}".format(type(params)))
return params
def delete_parameter(self, obj):
"""
Removes a parameter object by name.
Attributes
----------
obj : str
Name of the parameter object to be removed.
"""
self.listOfParameters.pop(obj)
def set_parameter(self, pname, expression):
"""
Set the value of an existing paramter "pname" to "expression".
Attributes
----------
pname : str
Name of the parameter whose value will be set.
expression : str
*String* that may be executed in C, describing the value of the
parameter. May reference other parameters by name. (e.g. "k1*4")
"""
p = self.listOfParameters[pname]
p.expression = expression
p.evaluate()
def resolve_parameters(self):
""" Internal function:
attempt to resolve all parameter expressions to scalar floats.
This methods must be called before exporting the model. """
self.update_namespace()
for param in self.listOfParameters:
try:
self.listOfParameters[param].evaluate(self.namespace)
except:
raise ParameterError("Could not resolve Parameter expression "
+ param + "to a scalar value.")
def delete_all_parameters(self):
""" Deletes all parameters from model. """
self.listOfParameters.clear()
def add_reaction(self,reacs):
"""
Adds a reaction, or list of reactions to the model.
Attributes
----------
obj : Reaction, or list of Reactions
The reaction or list of reaction objects to be added to the model
object.
"""
# TODO, make sure that you cannot overwrite an existing reaction
if isinstance(reacs, list):
for r in reacs:
self.listOfReactions[r.name] = r
elif isinstance(reacs, dict):
self.listOfReactions = reacs
elif isinstance(reacs, Reaction):
self.listOfReactions[reacs.name] = reacs
else:
raise Exception("reacs should be a list, dict or Reaction and is instead a {}".format(type(reacs)))
return reacs
def timespan(self, tspan):
"""
Set the time span of simulation. StochKit does not support non-uniform
timespans.
tspan : numpy ndarray
Evenly-spaced list of times at which to sample the species
populations during the simulation.
"""
items = numpy.diff(tspan)
items = [round(x, 10) for x in items]
isuniform = (len(set(items)) == 1)
if isuniform:
self.tspan = tspan
else:
raise InvalidModelError("StochKit only supports uniform timespans")
def get_reaction(self, rname):
return self.listOfReactions[rname]
def get_all_reactions(self):
return self.listOfReactions
def delete_reaction(self, obj):
self.listOfReactions.pop(obj)
def delete_all_reactions(self):
self.listOfReactions.clear()
def run(self, number_of_trajectories=1, seed=None,
solver=None, stochkit_home=None, debug=False, show_labels=True):
"""
Function calling simulation of the model. There are a number of
parameters to be set here.
Attributes
----------
number_of_trajectories : int
The number of times to sample the chemical master equation. Each
trajectory will be returned at the end of the simulation.
Optional, defaults to 1.
seed : int
The random seed for the simulation. Optional, defaults to None.
solver : gillespy.GillesPySolver
The solver by which to simulate the model. This solver object may
be initialized separately to specify an algorithm. Optional,
defulats to StochKitSolver SSA.
stochkit_home : str
Path to stochkit. This is set automatically upon installation, but
may be overwritten if desired.
debug : bool (False)
Set to True to provide additional debug information about the
simulation.
show_labels : bool (True)
Use names of species as index of result object rather than position numbers.
"""
if solver is not None:
if issubclass(solver, GillesPySolver):
return solver.run(self, t=self.tspan[-1],
increment=self.tspan[-1]-self.tspan[-2],
seed=seed,
number_of_trajectories=number_of_trajectories,
stochkit_home=stochkit_home, debug=debug,
show_labels=show_labels)
else:
raise SimulationError(
"argument 'solver' to run() must be"+
" a subclass of GillesPySolver")
else:
return StochKitSolver.run(self,t=self.tspan[-1],
increment=self.tspan[-1]-self.tspan[-2], seed=seed,
number_of_trajectories=number_of_trajectories,
stochkit_home=stochkit_home, debug=debug,
show_labels=show_labels)
class Species(object):
"""
Chemical species. Can be added to Model object to interact with other
species or time.
Attributes
----------
name : str
The name by which this species will be called in reactions and within
the model.
initial_value : int >= 0
Initial population of this species. If this is not provided as an int,
the type will be changed when it is added by numpy.int
"""
def __init__(self, name="", initial_value=0):
# A species has a name (string) and an initial value (positive integer)
self.name = name
self.initial_value = np.int(initial_value)
assert self.initial_value >= 0, "A species initial value has to \
be a positive number."
class Parameter():
"""
A parameter can be given as an expression (function) or directly
as a value (scalar). If given an expression, it should be
understood as evaluable in the namespace of a parent Model.
Attributes
----------
name : str
The name by which this parameter is called or referenced in reactions.
expression : str
String for a function calculating parameter values. Should be evaluable
in namespace of Model.
value : float
Value of a parameter if it is not dependent on other Model entities.
"""
def __init__(self, name="", expression=None, value=None):
self.name = name
# We allow expression to be passed in as a non-string type. Invalid strings
# will be caught below. It is perfectly fine to give a scalar value as the expression.
# This can then be evaluated in an empty namespace to the scalar value.
self.expression = expression
if expression != None:
self.expression = str(expression)
self.value = value
# self.value is allowed to be None, but not self.expression. self.value
# might not be evaluable in the namespace of this parameter, but defined
# in the context of a model or reaction.
if self.expression == None:
raise TypeError
if self.value == None:
self.evaluate()
def evaluate(self, namespace={}):
"""
Evaluate the expression and return the (scalar) value in the given
namespace.
Attributes
----------
namespace : dict (optional)
The namespace in which to test evaluation of the parameter, if it
involves other parameters, etc.
"""
try:
self.value = (float(eval(self.expression, namespace)))
except:
self.value = None
def set_expression(self, expression):
"""
Sets the expression for a parameter.
"""
self.expression = expression
# We allow expression to be passed in as a non-string type. Invalid
# strings will be caught below. It is perfectly fine to give a scalar
# value as the expression. This can then be evaluated in an empty
# namespace to the scalar value.
if expression != None:
self.expression = str(expression)
if self.expression == None:
raise TypeError
self.evaluate()
class Reaction():
"""
Models a single reaction. A reaction has its own dicts of species
(reactants and products) and parameters. The reaction's propensity
function needs to be evaluable (and result in a non-negative scalar
value) in the namespace defined by the union of those dicts.
Attributes
----------
name : str
The name by which the reaction is called.
reactants : dict
The reactants that are consumed in the reaction, with stoichiometry. An
example would be {R1 : 1, R2 : 2} if the reaction consumes two of R1 and
one of R2, where R1 and R2 are Species objects.
products : dict
The species that are created by the reaction event, with stoichiometry.
Same format as reactants.
propensity_function : str
The custom propensity fcn for the reaction. Must be evaluable in the
namespace of the reaction using C operations.
massaction : bool
The switch to use a mass-action reaction. If set to True, a rate value
is required.
rate : float
The rate of the mass-action reaction. Take care to note the units...
annotation : str
An optional note about the reaction.
Notes
----------
For a species that is NOT consumed in the reaction but is part of a mass
action reaction, add it as both a reactant and a product.
Mass-action reactions must also have a rate term added. Note that the rate
must be scaled by the volume prior to being added for unit consistency.
"""
def __init__(self, name = "", reactants = {}, products = {},
propensity_function = None, massaction = False,
rate=None, annotation=None):
"""
Initializes the reaction using short-hand notation.
"""
# Metadata
self.name = name
self.annotation = ""
if rate is None and propensity_function is None:
raise ReactionError("You must specify either a mass-action rate or"+
" a propensity function")
# We might use this flag in the future to automatically generate
# the propensity function if set to True.
if propensity_function is not None:
self.massaction = False
else:
self.massaction = True
self.propensity_function = propensity_function
if self.propensity_function is not None and self.massaction:
errmsg = ("Reaction "+self.name +" You cannot set the propensity "+
"type to mass-action and simultaneously set a propensity function."
)
raise ReactionError(errmsg)
self.reactants = {}
for r in reactants:
if isinstance(r, Species):
self.reactants[r.name] = reactants[r]
else:
self.reactants[r] = reactants[r]
self.products = {}
for p in products:
if isinstance(p, Species):
self.products[p.name] = products[p]
else:
self.products[p] = products[p]
if self.massaction:
self.type = "mass-action"
if rate is None:
raise ReactionError("Reaction : A mass-action propensity has\
to have a rate.")
self.marate = rate
self.create_mass_action()
else:
self.type = "customized"
def create_mass_action(self):
"""
Initializes the mass action propensity function given
self.reactants and a single parameter value.
"""
# We support zeroth, first and second order propensities only.
# There is no theoretical justification for higher order propensities.
# Users can still create such propensities if they really want to,
# but should then use a custom propensity.
total_stoch=0
for r in self.reactants:
total_stoch+=self.reactants[r]
if total_stoch>2:
raise ReactionError("Reaction: A mass-action reaction cannot \
involve more than two of one species or one of two species.")
# Case EmptySet -> Y
propensity_function = self.marate.name;
# There are only three ways to get 'total_stoch==2':
for r in self.reactants:
# Case 1: 2X -> Y
if self.reactants[r] == 2:
propensity_function = ("0.5*" +propensity_function+
"*"+r+"*("+r+"-1)/vol")
else:
# Case 3: X1, X2 -> Y;
propensity_function += "*"+r
# Set the volume dependency based on order.
order = len(self.reactants)
if order == 2:
propensity_function += "/vol"
elif order == 0:
propensity_function += "*vol"
self.propensity_function = propensity_function
def setType(self, rxntype):
"""
Sets reaction type to either "mass-action" or "customized"
Attributes
----------
rxntype : str
Either "mass-action" or "customized"
"""
if rxntype.lower() not in {'mass-action','customized'}:
raise ReactionError("Invalid reaction type.")
self.type = rxntype.lower()
self.massaction = False if self.type == 'customized' else True
def addReactant(self, S, stoichiometry):
"""
Adds a reactant to the reaction (species that is consumed)
Attributes
----------
S : gillespy.Species
Reactant to add to this reaction.
stoichiometry : int
The stoichiometry of the given reactant.
"""
if stoichiometry <= 0:
raise ReactionError("Reaction Stoichiometry must be a \
positive integer.")
self.reactants[S.name]=stoichiometry
print(self.reactants)
def addProduct(self, S, stoichiometry):
"""
Adds a product to the reaction (species that is created)
Attributes
----------
S : gillespy.Species
Product to add to this reaction.
stoichiometry : int
The stoichiometry of the given product.
"""
self.products[S.name]=stoichiometry
def Annotate(self, annotation):
"""
Adds a note to the reaction
Attributes
----------
annotation : str
An optional note about the reaction.
"""
self.annotation = annotation
# Module exceptions
class ModelError(Exception):
pass
class SpeciesError(ModelError):
pass
class ReactionError(ModelError):
pass
class ParameterError(ModelError):
pass
class SimuliationError(Exception):
pass
class StochMLDocument():
""" Serializiation and deserialization of a Model to/from
the native StochKit2 XML format. """
def __init__(self):
# The root element
self.document = etree.Element("Model")
self.annotation = None
@classmethod
def from_model(cls,model):
"""
Creates an StochKit XML document from an exisiting Mdoel object.
This method assumes that all the parameters in the model are already
resolved to scalar floats (see Model.resolveParamters).
Note, this method is intended to be used interanally by the models
'serialization' function, which performs additional operations and
tests on the model prior to writing out the XML file. You should NOT \
do:
document = StochMLDocument.fromModel(model)
print document.toString()
You SHOULD do
print model.serialize()
"""
# Description
md = cls()
d = etree.Element('Description')
#
if model.units.lower() == "concentration":
d.set('units', model.units.lower())
d.text = model.annotation
md.document.append(d)
# Number of Reactions
nr = etree.Element('NumberOfReactions')
nr.text = str(len(model.listOfReactions))
md.document.append(nr)
# Number of Species
ns = etree.Element('NumberOfSpecies')
ns.text = str(len(model.listOfSpecies))
md.document.append(ns)
# Species
spec = etree.Element('SpeciesList')
for sname in model.listOfSpecies:
spec.append(md.species_to_element(model.listOfSpecies[sname]))
md.document.append(spec)
# Parameters
params = etree.Element('ParametersList')
for pname in model.listOfParameters:
params.append(md.parameter_to_element(
model.listOfParameters[pname]))
params.append(md.parameter_to_element(Parameter(name='vol', expression=model.volume)))
md.document.append(params)
# Reactions
reacs = etree.Element('ReactionsList')
for rname in model.listOfReactions:
reacs.append(md.reaction_to_element(model.listOfReactions[rname], model.volume))
md.document.append(reacs)
return md
@classmethod
def from_file(cls,filepath):
""" Intializes the document from an exisiting native StochKit XML
file read from disk. """
tree = etree.parse(filepath)
root = tree.getroot()
md = cls()
md.document = root
return md
@classmethod
def from_string(cls,string):
""" Intializes the document from an exisiting native StochKit XML
file read from disk. """
root = etree.fromString(string)
md = cls()
md.document = root
return md
def to_model(self,name):
""" Instantiates a Model object from a StochMLDocument. """
# Empty model
model = Model(name=name)
root = self.document
# Try to set name from document
if model.name is "":
name = root.find('Name')
if name.text is None:
raise Exception("Model should have a name.")
else:
model.name = name.text
# Set annotiation
ann = root.find('Description')
if ann is not None:
units = ann.get('units')
if units:
units = units.strip().lower()
if units == "concentration":
model.units = "concentration"
elif units == "population":
model.units = "population"
else: # Default
model.units = "population"
if ann.text is None:
model.annotation = ""
else:
model.annotation = ann.text
# Set units
units = root.find('Units')
if units is not None:
if units.text.strip().lower() == "concentration":
model.units = "concentration"
elif units.text.strip().lower() == "population":
model.units = "population"
else: # Default
model.units = "population"
# Create parameters
for px in root.iter('Parameter'):
name = px.find('Id').text
expr = px.find('Expression').text
if name.lower() == 'volume':
model.volume = expr
else:
p = Parameter(name,expression=expr)
# Try to evaluate the expression in the empty namespace
# (if the expr is a scalar value)
p.evaluate()
model.add_parameter(p)
# Create species
for spec in root.iter('Species'):
name = spec.find('Id').text
val = spec.find('InitialPopulation').text
s = Species(name,initial_value = float(val))
model.add_species([s])
# The namespace_propensity for evaluating the propensity function
# for reactions must contain all the species and parameters.
namespace_propensity = OrderedDict()
all_species = model.get_all_species()
all_parameters = model.get_all_parameters()
for param in all_species:
namespace_propensity[param] = all_species[param].initial_value
for param in all_parameters:
namespace_propensity[param] = all_parameters[param].value
# Create reactions
for reac in root.iter('Reaction'):
try:
name = reac.find('Id').text
except:
raise InvalidStochMLError("Reaction has no name.")
reaction = Reaction(name=name,reactants={},products={})
# Type may be 'mass-action','customized'
try:
type = reac.find('Type').text
except:
raise InvalidStochMLError("No reaction type specified.")
reactants = reac.find('Reactants')
try:
for ss in reactants.iter('SpeciesReference'):
specname = ss.get('id')
# The stochiometry should be an integer value, but some
# exising StoxhKit models have them as floats. This is
# why we need the slightly odd conversion below.
stoch = int(float(ss.get('stoichiometry')))
# Select a reference to species with name specname
sref = model.listOfSpecies[specname]
try:
# The sref list should only contain one element if
# the XML file is valid.
reaction.reactants[specname] = stoch
except Exception as e:
StochMLImportError(e)
except:
# Yes, this is correct. 'reactants' can be None
pass
products = reac.find('Products')
try:
for ss in products.iter('SpeciesReference'):
specname = ss.get('id')
stoch = int(float(ss.get('stoichiometry')))
sref = model.listOfSpecies[specname]
try:
# The sref list should only contain one element if
# the XML file is valid.
reaction.products[specname] = stoch
except Exception as e:
raise StochMLImportError(e)
except:
# Yes, this is correct. 'products' can be None
pass
if type == 'mass-action':
reaction.massaction = True
reaction.type = 'mass-action'
# If it is mass-action, a parameter reference is needed.
# This has to be a reference to a species instance. We
# explicitly disallow a scalar value to be passed as the
# parameter.
try:
ratename=reac.find('Rate').text
try:
reaction.marate = model.listOfParameters[ratename]
except KeyError as k:
# No paramter name is given. This is a valid use case
# in StochKit. We generate a name for the paramter,
# and create a new parameter instance. The parameter's
# value should now be found in 'ratename'.
generated_rate_name = "Reaction_" + name + \
"_rate_constant"
p = Parameter(name=generated_rate_name,
expression=ratename)
# Try to evaluate the parameter to set its value
p.evaluate()
model.add_parameter(p)
reaction.marate = model.listOfParameters[
generated_rate_name]
reaction.create_mass_action()
except Exception as e:
raise
elif type == 'customized':
try:
propfunc = reac.find('PropensityFunction').text
except Exception as e:
raise InvalidStochMLError("Found a customized " +
"propensity function, but no expression was given."+e)
reaction.propensity_function = propfunc
else:
raise InvalidStochMLError(
"Unsupported or no reaction type given for reaction" + name)
model.add_reaction(reaction)
return model
def to_string(self):
""" Returns the document as a string. """
try:
return etree.tostring(self.document, pretty_print=True)
except:
# Hack to print pretty xml without pretty-print
# (requires the lxml module).
doc = etree.tostring(self.document)
xmldoc = xml.dom.minidom.parseString(doc)
uglyXml = xmldoc.toprettyxml(indent=' ')
text_re = re.compile(">\n\s+([^<>\s].*?)\n\s+</", re.DOTALL)
prettyXml = text_re.sub(">\g<1></", uglyXml)
return prettyXml
def species_to_element(self,S):
e = etree.Element('Species')
idElement = etree.Element('Id')
idElement.text = S.name
e.append(idElement)
if hasattr(S, 'description'):
descriptionElement = etree.Element('Description')
descriptionElement.text = S.description
e.append(descriptionElement)
initialPopulationElement = etree.Element('InitialPopulation')
initialPopulationElement.text = str(S.initial_value)
e.append(initialPopulationElement)
return e
def parameter_to_element(self,P):
e = etree.Element('Parameter')
idElement = etree.Element('Id')
idElement.text = P.name
e.append(idElement)
expressionElement = etree.Element('Expression')
expressionElement.text = str(P.value)
e.append(expressionElement)
return e
def reaction_to_element(self,R, model_volume):
e = etree.Element('Reaction')
idElement = etree.Element('Id')
idElement.text = R.name
e.append(idElement)
descriptionElement = etree.Element('Description')
descriptionElement.text = self.annotation
e.append(descriptionElement)
# StochKit2 wants a rate for mass-action propensites
if R.massaction and model_volume == 1.0:
rateElement = etree.Element('Rate')
# A mass-action reactions should only have one parameter
rateElement.text = R.marate.name
typeElement = etree.Element('Type')
typeElement.text = 'mass-action'
e.append(typeElement)
e.append(rateElement)
else:
typeElement = etree.Element('Type')
typeElement.text = 'customized'
e.append(typeElement)
functionElement = etree.Element('PropensityFunction')
functionElement.text = R.propensity_function
e.append(functionElement)
reactants = etree.Element('Reactants')
for reactant, stoichiometry in list(R.reactants.items()):
srElement = etree.Element('SpeciesReference')
srElement.set('id', reactant)
srElement.set('stoichiometry', str(stoichiometry))
reactants.append(srElement)
e.append(reactants)
products = etree.Element('Products')
for product, stoichiometry in list(R.products.items()):
srElement = etree.Element('SpeciesReference')
srElement.set('id', product)
srElement.set('stoichiometry', str(stoichiometry))
products.append(srElement)
e.append(products)
return e
class GillesPySolver():
"""
Abstract class for a solver. This is generally called from within a
gillespy Model through the Model.run function. Returns simulation
trajectories.
Attributes
----------
model : gillespy.Model
The model on which the solver will operate.
t : float
The end time of the solver.
number_of_trajectories : int
The number of times to sample the chemical master equation. Each
trajectory will be returned at the end of the simulation.
increment : float
The time step of the solution.
seed : int
The random seed for the simulation. Defaults to None.
stochkit_home : str
Path to stochkit. This is set automatically upon installation, but
may be overwritten if desired.
algorithm : str
The solver by which to simulate the model. 'ssa' or 'tau_leaping'
are the available options. If 'ssa' is chosen, StochKit will choose
from the available ssa options.
job_id : str
If given, this will be the name of the solver run. Usually not set.
extra_args : str
Any extra arguments for the stochkit solver. See StochKit2
documentation for details.
debug : bool (False)
Set to True to provide additional debug information about the
simulation.
show_labels : bool (True)
Use names of species as index of result object rather than position numbers.
"""
def run(self, model, t=20, number_of_trajectories=1,
increment=0.05, seed=None, stochkit_home=None, algorithm=None,
job_id=None, extra_args='', debug=False, show_labels=False):
"""
Call out and run the solver. Collect the results.
"""
if algorithm is None:
raise SimuliationError("No algorithm selected")
# We write all StochKit input and output files to a temporary folder
prefix_basedir = tempfile.mkdtemp()
prefix_outdir = os.path.join(prefix_basedir, 'output')
os.mkdir(os.path.join(prefix_basedir, 'output'))
if job_id is None:
job_id = str(uuid.uuid4())
# Write a temporary StochKit2 input file.
if isinstance(model, Model):
outfile = os.path.join(prefix_basedir,
"temp_input_"+job_id+".xml")
mfhandle = open(outfile, 'w')
#document = StochMLDocument.from_model(model)
# If the model is a Model instance, we serialize it to XML,
# and if it is an XML file, we just make a copy.
if isinstance(model, Model):
document = model.serialize()
mfhandle.write(document)
mfhandle.close()
elif isinstance(model, str):
outfile = model
# Assemble argument list for StochKit
ensemblename = job_id
directories = os.listdir(prefix_outdir)
outdir = prefix_outdir+'/'+ensemblename
# Algorithm, SSA or Tau-leaping?
executable = None
if stochkit_home is not None:
if os.path.isfile(os.path.join(stochkit_home, algorithm)):
executable = os.path.join(stochkit_home, algorithm)
else:
raise SimuliationError("stochkit executable '{0}' not found \
stochkit_home={1}".format(algorithm, stochkit_home))
elif os.environ.get('STOCHKIT_HOME') is not None:
if os.path.isfile(os.path.join(os.environ.get('STOCHKIT_HOME'),
algorithm)):
executable = os.path.join(os.environ.get('STOCHKIT_HOME'),
algorithm)
if executable is None:
# try to find the executable in the path
if os.environ.get('PATH') is not None:
for dir in os.environ.get('PATH').split(':'):
if os.path.isfile(os.path.join(dir, algorithm)):
executable = os.path.join(dir, algorithm)
break
if executable is None:
raise SimulationError("stochkit executable '{0}' not found. \
Make sure it is your path, or set STOCHKIT_HOME envronment \
variable'".format(algorithm))
# Assemble the argument list
args = ''
args += '--model '
args += outfile
args += ' --out-dir '+outdir
args += ' -t '
args += str(t)
if increment == None:
increment = t/20.0
num_output_points = str(int(float(t/increment)))
args += ' -i ' + num_output_points
if ensemblename in directories:
print('Ensemble '+ensemblename+' already existed, using --force.')
args+=' --force'
# If we are using local mode, shell out and run StochKit
# (SSA or Tau-leaping or ODE)
cmd = executable+' '+args+' '+extra_args
if debug:
print("cmd: {0}".format(cmd))
# Execute
try:
#print "CMD: {0}".format(cmd)
handle = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return_code = handle.wait()
except OSError as e:
raise SimuliationError("Solver execution failed: \
{0}\n{1}".format(cmd, e))
try:
stderr = handle.stderr.read()
except Exception as e:
stderr = 'Error reading stderr: {0}'.format(e)
try:
stdout = handle.stdout.read()
except Exception as e:
stdout = 'Error reading stdout: {0}'.format(e)
if return_code != 0:
#print stdout
#print stderr
raise SimuliationError("Solver execution failed: \
'{0}' output: {1}{2}".format(cmd,stdout,stderr))
# Get data using solver specific function
try:
if show_labels:
labels, trajectories = self.get_trajectories(outdir, debug=debug, show_labels=True)
else:
trajectories = self.get_trajectories(outdir, debug=debug, show_labels=False)
except Exception as e:
fname = os.path.join(prefix_basedir,'temp_input_{0}_generated_code'.format(ensemblename),'compile-log.txt')
if os.path.isfile(fname):
with open(fname) as f:
cerr = f.read()
raise SimulationError("Error compiling custom propensities: {0}\n{1}\n".format(fname,cerr))
fname = os.path.join(prefix_outdir,ensemblename,'log.txt')
if os.path.isfile(fname):
with open(fname) as f:
cerr = f.read()
raise SimulationError("Error running simulation: {0}\n{1}\n".format(fname,cerr))
raise SimulationError("Error using solver.get_trajectories('{0}'): {1}".format(outdir, e))
if len(trajectories) == 0:
#print stdout
#print stderr
raise SimuliationError("Solver execution failed: \
'{0}' output: {1}{2}".format(cmd,stdout,stderr))
# Clean up
if debug:
print("prefix_basedir={0}".format(prefix_basedir))
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
else:
shutil.rmtree(prefix_basedir)
# Return data
if show_labels:
results2 = []
for r in trajectories:
ret = {}
for n,l in enumerate(labels):
ret[l] = r[:,n]
results2.append(ret)
return results2
else:
return trajectories
class StochKitSolver(GillesPySolver):
"""
Abstract class for StochKit solver derived from the GillesPySolver class.
This is generally used to set up the solver.
Attributes
----------
model : gillespy.Model
The model on which the solver will operate.
t : float
The end time of the solver.
number_of_trajectories : int
The number of times to sample the chemical master equation. Each
trajectory will be returned at the end of the simulation.
increment : float
The time step of the solution.
seed : int
The random seed for the simulation. Defaults to None.
stochkit_home : str
Path to stochkit. This is set automatically upon installation, but
may be overwritten if desired.
algorithm : str
The solver by which to simulate the model. 'ssa' or 'tau_leaping'
are the available options. If 'ssa' is chosen, StochKit will choose
from the available ssa options.
job_id : str
If given, this will be the name of the solver run. Usually not set.
method : str
The specific SSA to call. NOT YET FUNCTIONAL.
debug : bool (False)
Set to True to provide additional debug information about the
simulation.
"""
@classmethod
def run(cls, model, t=20, number_of_trajectories=1,
increment=0.05, seed=None, stochkit_home=None, algorithm='ssa',
job_id=None, method=None,debug=False, show_labels=False):
# all this is specific to StochKit
if model.units == "concentration":
raise SimuliationError("StochKit can only simulate population "+
"models, please convert to population-based model for "+
"stochastic simulation. Use solver = StochKitODESolver "+
"instead to simulate a concentration model deterministically.")
if seed is None:
seed = random.randint(0, 2147483647)
# StochKit breaks for long ints
if seed.bit_length()>=32:
seed = seed & ((1<<32)-1)
if seed > (1 << 31) -1:
seed -= 1 << 32
# Only use on processor per StochKit job.
args = ' -p 1'
# We keep all the trajectories by default.
args += ' --keep-trajectories'
args += ' --label'
args += ' --seed '
args += str(seed)
realizations = number_of_trajectories
args += ' --realizations '
args += str(realizations)
if method is not None: #This only works for StochKit 2.1
args += ' --method ' + str(method)
self = StochKitSolver()
return GillesPySolver.run(self, model,t, number_of_trajectories,
increment, seed, stochkit_home,
algorithm,
job_id, extra_args=args, debug=debug,
show_labels=show_labels)
def get_trajectories(self, outdir, debug=False, show_labels=False):
# Collect all the output data
files = os.listdir(outdir + '/stats')
trajectories = []
files = os.listdir(outdir + '/trajectories')
labels = []
if show_labels:
with open(outdir + '/trajectories/trajectory0.txt', 'r') as f:
first_line= f.readline()
labels = first_line.split()
for filename in files:
if 'trajectory' in filename:
trajectories.append(numpy.loadtxt(outdir + '/trajectories/' +
filename, skiprows=1))
else:
raise SimuliationError("Couldn't identify file '{0}' found in \
output folder".format(filename))
if show_labels:
return (labels, trajectories)
else:
return trajectories
class StochKitODESolver(GillesPySolver):
"""
Abstract class for StochKit solver derived from the GillesPySolver class.
This is generally used to set up the solver.
Attributes
----------
model : gillespy.Model
The model on which the solver will operate.
t : float
The end time of the solver.
number_of_trajectories : int
The number of times to sample the chemical master equation. Each
trajectory will be returned at the end of the simulation.
increment : float
The time step of the solution.
seed : int
The random seed for the simulation. Defaults to None.
stochkit_home : str
Path to stochkit. This is set automatically upon installation, but
may be overwritten if desired.
algorithm : str
Already set to 'stochkit_ode.py'
job_id : str
If given, this will be the name of the solver run. Usually not set.
debug : bool (False)
Set to True to provide additional debug information about the
simulation.
"""
@classmethod
def run(cls, model, t=20, number_of_trajectories=1,
increment=0.05, seed=None, stochkit_home=None,
algorithm='stochkit_ode.py',
job_id=None, debug=False, show_labels=False):
self = StochKitODESolver()
return GillesPySolver.run(self,model,t, number_of_trajectories,
increment, seed, stochkit_home,
algorithm,
job_id, debug=debug,
show_labels=show_labels)
def get_trajectories(self, outdir, debug=False, show_labels=False):
if debug:
print("StochKitODESolver.get_trajectories(outdir={0}".format(outdir))
# Collect all the output data
trajectories = []
with open(outdir + '/output.txt') as fd:
fd.readline()
headers = fd.readline()
fd.readline()
data = []
data.append([float(x) for x in fd.readline().split()])
fd.readline()
for line in fd:
data.append([float(x) for x in line.split()])
trajectories.append(numpy.array(data))
if show_labels:
return (headers.split(), trajectories)
else:
return trajectories
# Exceptions
class StochMLImportError(Exception):
pass
class InvalidStochMLError(Exception):
pass
class InvalidModelError(Exception):
pass
class SimulationError(Exception):
pass
| gpl-3.0 |
jswanljung/iris | lib/iris/tests/__init__.py | 1 | 39426 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import codecs
import collections
import contextlib
import difflib
import filecmp
import functools
import gzip
import hashlib
import inspect
import json
import io
import logging
import os
import os.path
import shutil
import subprocess
import sys
import unittest
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import filelock
import numpy as np
import numpy.ma as ma
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
matplotlib.use('agg')
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
import iris_grib
GRIB_AVAILABLE = True
from iris_grib.message import GribMessage
except ImportError:
try:
import gribapi
GRIB_AVAILABLE = True
from iris.fileformats.grib.message import GribMessage
except ImportError:
GRIB_AVAILABLE = False
try:
import iris_sample_data
except ImportError:
SAMPLE_DATA_AVAILABLE = False
else:
SAMPLE_DATA_AVAILABLE = True
try:
import nc_time_axis
NC_TIME_AXIS_AVAILABLE = True
except ImportError:
NC_TIME_AXIS_AVAILABLE = False
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# A shared logger for use by unit tests
logger = logging.getLogger('tests')
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if (MPL_AVAILABLE and '-d' in sys.argv):
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
_DEFAULT_IMAGE_TOLERANCE = 10.0
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
data_path = os.path.join(iris.config.TEST_DATA_DIR, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
class IrisTest(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
@staticmethod
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(self.get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, *args,
**kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [self.get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the numpy data file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.npy' % i
self.assertCubeDataAlmostEqual(cube, fname, *args, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = self.get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = self.get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
result = np.load(reference_path)
if isinstance(result, np.lib.npyio.NpzFile):
self.assertIsInstance(cube.data, ma.MaskedArray, 'Cube data was not a masked array.')
# Avoid comparing any non-initialised array data.
data = cube.data.filled()
np.testing.assert_array_almost_equal(data, result['data'],
*args, **kwargs)
np.testing.assert_array_equal(cube.data.mask, result['mask'])
else:
np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
if isinstance(cube.data, ma.MaskedArray):
# Avoid recording any non-initialised array data.
data = cube.data.filled()
with open(reference_path, 'wb') as reference_file:
np.savez(reference_file, data=data, mask=cube.data.mask)
else:
with open(reference_path, 'wb') as reference_file:
np.save(reference_file, cube.data)
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = self.get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = self.get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')
if bits[0] == '__main__':
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
logger.warning('Creating folder: %s', dir_path)
os.makedirs(dir_path)
def _assert_graphic(self):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the IRIS_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
dev_mode = os.environ.get('IRIS_TEST_CREATE_MISSING')
unique_id = self._unique_id()
repo_fname = os.path.join(os.path.dirname(__file__),
'results', 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(os.path.dirname(result_fname)):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(os.path.dirname(result_fname))
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _save_figure_hash():
plt.gcf().savefig(result_fname)
# Determine the test result image hash using sha1.
with open(result_fname, 'rb') as fi:
sha1 = hashlib.sha1(fi.read())
return sha1
def _create_missing():
fname = sha1.hexdigest() + '.png'
base_uri = ('https://scitools.github.io/test-images-scitools/'
'image_files/{}')
uri = base_uri.format(fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print('Creating image file: {}'.format(hash_fname))
os.rename(result_fname, hash_fname)
msg = 'Creating imagerepo entry: {} -> {}'
print(msg.format(unique_id, uri))
with open(repo_fname, 'wb') as fo:
json.dump(repo, codecs.getwriter('utf-8')(fo), indent=4,
sort_keys=True)
sha1 = _save_figure_hash()
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
emsg = 'Missing image test result: {}.'
raise ValueError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Cherry-pick the registered expected hashes from the
# test case uri/s.
expected = [os.path.splitext(os.path.basename(uri))[0]
for uri in uris]
if sha1.hexdigest() not in expected:
# This can be an accidental failure, unusual, but it occurs
# https://github.com/SciTools/iris/issues/2195
# retry once, in case it passes second time round.
sha1 = _save_figure_hash()
if sha1.hexdigest() not in expected:
if dev_mode:
_create_missing()
else:
emsg = 'Actual SHA1 {} not in expected {} for test {}.'
emsg = emsg.format(sha1.hexdigest(), expected,
unique_id)
if _DISPLAY_FIGURES:
print('Image comparison would have failed. '
'Message: %s' % emsg)
else:
raise ValueError('Image comparison failed. '
'Message: {}'.format(emsg))
else:
# There is no difference between the actual and expected
# result, so remove the actual result file.
os.remove(result_fname)
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
def check_graphic(self, tol=None):
"""
Checks that the image hash for the current matplotlib figure matches
the expected image hash for the current test.
"""
fname = os.path.join(os.path.dirname(__file__),
'results', 'imagerepo.lock')
lock = filelock.FileLock(fname)
# The imagerepo.json file is a critical resource, so ensure thread
# safe read/write behaviour via platform independent file locking.
with lock.acquire(timeout=600):
self._assert_graphic()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
def assertArrayShapeStats(self, result, shape, mean, std_dev):
"""
Assert that the result, a cube, has the provided shape and that the
mean and standard deviation of the data array are also as provided.
Thus build confidence that a cube processing operation, such as a
cube.regrid, has maintained its behaviour.
"""
self.assertEqual(result.shape, shape)
self.assertAlmostEqual(result.data.mean(), mean, places=5)
self.assertAlmostEqual(result.data.std(), std_dev, places=5)
get_result_path = IrisTest.get_result_path
class GraphicsTest(IrisTest):
def setUp(self):
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE, 'Test(s) require "gribapi", '
'which is not available.')
skip_sample_data = unittest.skipIf(not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris_sample_data", '
'which is not available.'))
skip_nc_time_axis = unittest.skipIf(
not NC_TIME_AXIS_AVAILABLE,
'Test(s) require "nc_time_axis", which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
| lgpl-3.0 |
chappers/sklearn-recipes | streaming_take2/ogfs_classifier.py | 3 | 9060 | import sklearn
from sklearn.datasets import make_regression, make_classification
from sklearn.linear_model import SGDRegressor, SGDClassifier
import pandas as pd
import numpy as np
import SPEC
from scipy import stats
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.mixture import BayesianGaussianMixture
def similarity_within_class(X, y):
return SPEC.similarity_classification(X, y)
def similarity_between_class(X, y):
"""
Calculates betweenclass affinity X (data) y (labels)
note that it only considers the labels
"""
y_series = pd.Series(y)
y_val = y_series.value_counts(normalize=True)
n_inv = 1.0/len(set(y))
y_size = len(y)
sim_matrix = np.zeros((len(y), len(y)))
for s_i in range(y_size):
for s_j in range(y_size):
sim_matrix[s_i, s_j] = n_inv - y_val[y[s_i]] if y[s_i] == y[s_j] else n_inv
return sim_matrix
def convert_to_deciles(y, n=10, gmm=False):
"""
By default converts to deciles, can be changed based on choice of n.
"""
if gmm:
# this is experimental
bgm = BayesianGaussianMixture(n_components=10)
bgm.fit(y.reshape(-1, 1))
return bgm.predict(y.reshape(-1, 1))
return np.array(pd.cut(y, n, labels=range(n)))
def spec_supervised(X, y, is_classification=True):
if not is_classification:
y = convert_to_deciles(y, 10, gmm=False)
# sample X if it is too big...
instances_count = X.shape[0]
if instances_count > 1000:
idx = np.random.randint(instances_count, size=1000)
X = X[idx, :]
y = y[idx]
W_w = similarity_within_class(X, y)
W_b = similarity_between_class(X, y)
s_w = SPEC.spec(**{'X': X, 'y': y, 'style':0, 'mode': 'raw', 'W': W_w})
s_b = SPEC.spec(**{'X': X, 'y': y, 'style':0, 'mode': 'raw', 'W': W_b})
return s_b, s_w
def evaluate_feats1(s_b, s_w, highest_best=True):
curr_u1 = []
curr_u2 = []
my_feats = []
prev_score = None
X = s_b/s_w
eval_order = np.argsort(X).flatten()
if highest_best:
eval_order = eval_order[::-1]
for idx in list(eval_order):
if prev_score is None:
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
my_feats.append(idx)
else:
test_u1 = curr_u1[:]
test_u2 = curr_u2[:]
test_u1.append(s_b[idx])
test_u2.append(s_w[idx])
score = ((np.sum(test_u1)/np.sum(test_u2)) - prev_score)
if score > 0.001:
my_feats.append(idx)
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
prev_score = np.sum(curr_u1)/np.sum(curr_u2)
return list(my_feats)
def evaluate_feats2(X, alpha=0.05, highest_best=True):
"""
X is the raw scrores
alpha is the level of significance
This version uses T-test
Returns: set of indices indicating selected features.
"""
eval_order = np.argsort(X)
if highest_best:
eval_order = eval_order[::-1]
selected_feats = []
selected_idx = []
for idx in eval_order:
if len(selected_feats) == 0:
selected_feats.append(X[idx])
selected_idx.append(idx)
continue
# now continue on and decide what to do
mu = np.mean(selected_feats)
sigma = np.std(selected_feats)
U = len(selected_feats)
if sigma == 0.0 and U > 1:
return selected_idx
elif sigma == 0.0:
selected_feats.append(X[idx])
selected_idx.append(idx)
continue
# otherwise compute score for T test.
t_stat = (mu - X[idx])/(sigma/np.sqrt(U))
t_alpha = stats.t.pdf(t_stat, U)
if t_alpha <= alpha:
selected_feats.append(X[idx])
selected_idx.append(idx)
else:
return selected_idx
return selected_idx
def evaluate_feats(s_b, s_w, alpha=0.05):
set1 = evaluate_feats1(s_b,s_w)
set2 = evaluate_feats2(s_b/s_w, alpha)
return list(set(set1 + set2))
import pandas
class OGFSClassifier(SGDClassifier):
def __init__(self, loss="log", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=0.1, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None,
intragroup_alpha=0.05, intergroup_thres=None):
super(OGFSClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average, n_iter=n_iter)
"""
intragroup_alpha : the alpha level of t-test used to determine significance
intergroup_thres : the threshold for lasso to remove redundancy
"""
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
self.seen_cols = []
self.base_shape = None
self.intragroup_alpha = intragroup_alpha
self.intergroup_thres = intergroup_thres if intergroup_thres is not None else epsilon
def add_column_exclusion(self, cols):
self.coef_info['excluded_cols'] = self.coef_info['excluded_cols'] + cols
def _fit_columns(self, X_, return_x=True, transform_only=False):
"""
Method filter through "unselected" columns. The goal of this
method is to filter any uninformative columns.
This will be selected based on index only?
If return_x is false, it will only return the boolean mask.
"""
X = X_[X_.columns.difference(self.coef_info['excluded_cols'])]
# order the columns correctly...
col_order = self.coef_info['cols'] + list([x for x in X.columns if x not in self.coef_info['cols']])
X = X[col_order]
return X
def _reg_penalty(self, X):
col_coef = [(col, coef) for col, coef in zip(X.columns.tolist(), self.coef_.flatten()) if np.abs(coef) >= self.intergroup_thres]
self.coef_info['cols'] = [x for x, _ in col_coef]
self.coef_info['coef'] = [x for _, x in col_coef]
self.coef_info['excluded_cols'] = [x for x in self.seen_cols if x not in self.coef_info['cols']]
self.coef_ = np.array(self.coef_info['coef']).reshape(1, -1)
def _spectral_sel(self, X_, y):
"""
Partial fit online group feature selection method to
perform spectral analysis on incoming feature set
to then expand the coefficient listing
"""
X = np.array(X_)
s_b, s_w = spec_supervised(X, y, True)
col_sel = X_.columns[evaluate_feats(s_b, s_w)]
sel_cols = list(self.coef_info['cols']) + list(col_sel)
# update removed columns
self.coef_info['excluded_cols'] = [col for col in self.seen_cols if col not in sel_cols]
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
X_ = X.copy()
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
self.base_shape = None
# TODO: add the spectral selection here
self._spectral_sel(X, y)
X = self._fit_columns(X)
super(OGFSClassifier, self).fit(X, y, coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
self._reg_penalty(X)
return self
def partial_fit(self, X, y, sample_weight=None):
X_ = X.copy()
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
X = X[X.columns.difference(self.coef_info['excluded_cols'])]
# TODO: add the spectral selection here
# it should only consider "unseen"
self._spectral_sel(X[X.columns.difference(self.coef_info['cols'])], y)
X = self._fit_columns(X)
# now update coefficients
n_samples, n_features = X.shape
coef_list = np.zeros(n_features, dtype=np.float64, order="C")
coef_list[:len(self.coef_info['coef'])] = self.coef_info['coef']
self.coef_ = np.array(coef_list).reshape(1, -1)
super(OGFSClassifier, self).partial_fit(X, y, sample_weight=None)
self._reg_penalty(X)
return self
def predict(self, X):
X = self._fit_columns(X, transform_only=True)
return super(OGFSClassifier, self).predict(X)
def predict_proba(self, X):
X = self._fit_columns(X, transform_only=True)
return super(OGFSClassifier, self).predict_proba(X)
| mit |
ErBa508/data-science-from-scratch | code/gradient_descent.py | 53 | 5895 | from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
| unlicense |
gviejo/ThalamusPhysio | python/main_make_ISOMAP_HD_parallel.py | 1 | 4902 | #!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
'''
import sys
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
# import ipyparallel
from multiprocessing import Pool
import os
import neuroseries as nts
from time import time
from pylab import *
from sklearn.manifold import Isomap
from mpl_toolkits.mplot3d import Axes3D
import _pickle as cPickle
####################################################################################################################
# FUNCTIONS
####################################################################################################################
dview = Pool(4)
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
for session in datasets[4:]:
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
if np.sum(hd_info == 1)>10:
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
spikes = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
neurons = np.sort(list(spikes.keys()))
print(session, len(neurons))
bin_size = 30
# left_bound = np.arange(-1000-bin_size/2, 1000 - bin_size/4,bin_size/4) # 75% overlap
left_bound = np.arange(-1000 - bin_size/3/2, 1000 - bin_size/2, bin_size/2) # 50% overlap
# left_bound = np.arange(-1000-bin_size+3*bin_size/4, 1000 - 3*bin_size/4,3*bin_size/4) # 25% overlap
obins = np.vstack((left_bound, left_bound+bin_size)).T
times = obins[:,0]+(np.diff(obins)/2).flatten()
# sys.exit()
# cutting times between -500 to 500
times = times[np.logical_and(times>=-500, times<=500)]
# datatosave = {'times':times, 'swr':{}, 'rnd':{}, 'bin_size':bin_size}
datatosave = {'times':times, 'imaps':{}, 'bin_size':bin_size}
n_ex = 150
n_rip = len(rip_tsd)
n_loop = n_rip//n_ex
idx = np.random.randint(0, n_loop, n_rip)
####################################################################################################################
# WAKE
####################################################################################################################
bin_size_wake = 400
wake_ep = wake_ep.intersect(nts.IntervalSet(start=wake_ep.loc[0,'start'], end = wake_ep.loc[0,'start']+15*60*1e6))
bins = np.arange(wake_ep.as_units('ms').start.iloc[0], wake_ep.as_units('ms').end.iloc[-1]+bin_size_wake, bin_size_wake)
spike_counts = pd.DataFrame(index = bins[0:-1]+np.diff(bins)/2, columns = neurons)
for i in neurons:
spks = spikes[i].as_units('ms').index.values
spike_counts[i], _ = np.histogram(spks, bins)
rates_wak = np.sqrt(spike_counts/(bin_size_wake))
args = []
for i in range(n_loop):
# for i in range(10):
args.append([spikes, rip_tsd, idx, obins, neurons, bin_size, sws_ep, n_ex, times, rates_wak, i])
print(n_loop)
result = dview.starmap_async(compute_isomap, args).get()
for i in range(len(result)):
datatosave['imaps'][i] = {'swr':result[i][0], 'rnd':result[i][1], 'wak':result[i][2]}
####################################################################################################################
# SAVING
####################################################################################################################
cPickle.dump(datatosave, open('../figures/figures_articles_v4/figure1/hd_isomap_'+str(bin_size)+'ms_mixed_swr_rnd_wake/'+session.split("/")[1]+'.pickle', 'wb'))
| gpl-3.0 |
nhejazi/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 85 | 5600 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
warmspringwinds/scikit-image | doc/examples/plot_gabor.py | 3 | 4461 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as nd
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = nd.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(nd.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(nd.convolve(image, np.real(kernel), mode='wrap')**2 +
nd.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| bsd-3-clause |
ingmarschuster/ModelSelection | modsel/test/deflation_is.py | 1 | 5593 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 19 09:43:18 2015
@author: Ingmar Schuster
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy as sp
import scipy.stats as stats
from numpy import exp, log, sqrt
from scipy.misc import logsumexp
from numpy.linalg import inv
import modsel.deflation_is as dis
import modsel.mc.pmc as pmc
import modsel.estimator_statistics as es
import distributions as dist
import matplotlib.pyplot as plt
def test_DirCatTMM():
num_obs = 1000
for dim in range(2,4):
mu = np.array([11 * (i+1) for i in range(dim)])
K = np.eye(dim) * 5
df = dim + 1
obs_dist = dist.mvt(mu, K, df)
obs = obs_dist.rvs(num_obs)
dctmm = dis.DirCatTMM(obs, [1]*dim, obs_dist,
dist.invwishart(np.eye(dim) * 5, dim + 1),
stats.gamma(1, scale=1, loc=dim+1))
orig_cat_param = dctmm.cat_param
dctmm.cat_param = np.zeros(dim)
for i in range(dim):
dctmm.cat_param[i] = 1
### Test DirCatTMM.lpost_comp_indic ###
for j in range(dim):
c_indic = np.zeros(dim)
c_indic[j] = 1
for o in range(obs.shape[0]):
if i == j:
assert(dctmm.lpost_comp_indic(c_indic, o) > -np.inf)
else:
assert(dctmm.lpost_comp_indic(c_indic, o) == -np.inf)
c_indic[j] = 0
### Test DirCatTMM.llhood_comp_param ###
highest = dctmm.llhood_comp_param((mu, K, df), i)
assert(highest >= dctmm.llhood_comp_param((-mu, K, df), i))
assert(highest >= dctmm.llhood_comp_param((mu, K*5, df), i))
assert(highest >= dctmm.llhood_comp_param((mu, K/2, df), i))
assert(highest >= dctmm.llhood_comp_param((mu, K, df+10), i))
dctmm.cat_param[i] = 0
### Test DirCatTMM.lprior ###
dctmm.cat_param = np.array(dctmm.dir_param / dctmm.dir_param.sum())
dctmm.comp_indic = dist.categorical(dctmm.cat_param).rvs(num_obs, indic = True)
dctmm.update_comp_dists([(mu, K, df)] * dim)
highest = dctmm.lprior()
c_param = dctmm.dir_param + np.arange(dim)
dctmm.cat_param = np.array(c_param / c_param.sum())
ch_cat_param = dctmm.lprior()
assert(highest > ch_cat_param)
dctmm.update_comp_dists([(-mu, K, df)] * dim)
assert(ch_cat_param > dctmm.lprior())
def test_DirCatTMMProposal():
num_loc_proposals = 2
num_imp_samp = 1000
n_comp = 2
p_comp = np.array([0.7, 0.3])
dim = 1
num_obs = 100
obs = None
means = []
for i in range(n_comp):
means.append([20*i]*dim)
if obs is None:
obs = dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))
else:
obs = np.vstack([obs, dist.mvt(means[-1], np.eye(dim),30).rvs(np.int(np.round(num_obs*p_comp[i])))])
count = {"local_lpost" :0, "local_llhood" :0, "naive_lpost" :0 ,"naive_llhood" :0,"standard_lpost" :0 ,"standard_llhood" :0}
print(means)
#return
def count_closure(name):
def rval():
count[name] = count[name] + 1
return rval
initial_samples = []
for _ in range(10):
initial_samples.append(dis.DirCatTMM(obs, [1]*n_comp, dist.mvt(np.mean(means,0), np.eye(dim)*5, dim),
dist.invwishart(np.eye(dim) * 5, dim+1 ),
stats.gamma(1,scale=1)))
# (naive_samp, naive_lpost) = pmc.sample(num_imp_samp, initial_samples,
# dis.DirCatTMMProposal(naive_multi_proposals = num_loc_proposals,
# lpost_count = count_closure("naive_lpost"),
# llhood_count = count_closure("naive_llhood")),
# population_size = 4)
(infl_samp, infl_lpost) = pmc.sample(num_imp_samp, initial_samples,
dis.DirCatTMMProposal(num_local_proposals = num_loc_proposals,
lpost_count = count_closure("local_lpost"),
llhood_count = count_closure("local_llhood")),
population_size = 4)
(stand_samp, stand_lpost) = pmc.sample(num_imp_samp * num_loc_proposals, initial_samples,
dis.DirCatTMMProposal(lpost_count = count_closure("standard_lpost"),
llhood_count = count_closure("standard_llhood")),
population_size = 4)
print("===============\n",p_comp, means,
# "\n\n--NAIVE--\n",
# naive_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, naive_samp[-1].comp_indic.sum(0))+1, count["naive_llhood"], count["naive_lpost"],
"\n\n--LOCAL--\n",
infl_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, infl_samp[-1].comp_indic.sum(0))+1, count["local_llhood"], count["local_lpost"],
"\n\n--STANDARD--\n",
stand_samp[-1].comp_indic.sum(0), stats.entropy(p_comp, stand_samp[-1].comp_indic.sum(0))+1, count["standard_llhood"], count["standard_lpost"],"\n\n")
#return {"infl":(infl_samp, infl_lpost), "standard":(stand_samp, stand_lpost)}
| gpl-3.0 |
msotov/SPECIES | Atmos.py | 1 | 54873 | from __future__ import division
from builtins import range
import os
import math
import re
from dataclasses import dataclass
from past.utils import old_div
import numpy as np
from astropy.stats import sigma_clip
from astropy.io import ascii
from scipy.stats import sigmaclip, linregress
import scipy.odr as ODR
#from interpol_function import interpol
from uncertainties import unumpy
import matplotlib.pyplot as plt
from AtmosInterpol import interpol
@dataclass
class Tolerance:
ab: float
ep: float
dif: float
rw: float
@dataclass
class AtmosQuantity:
name: str
value: float
hold: bool
ranges: list
bounds: tuple
width: float
tol: float
change: float
class atmos:
def __init__(self, star, hold, init_vals, debug, file_debug, in_errors,
set_boundaries, vals_boundaries, tolerance=[0.001, 0.001, 0.001, 0.001],
alias='test', one_round=False, read_mode='linearregression'):
self.starname = star
self.alias = alias
self.ini = init_vals
if len(init_vals) < 4:
self.ini = [0.0, 5500., 4.36, 1.23]
self.debug = debug
self.file_debug = file_debug
self.change = 'metallicity'
self.parameters = ['metallicity', 'temperature', 'gravity', 'velocity']
#self.changepar = [200.0, 0.2, 0.2]
self.tol = Tolerance(*tolerance)
self.one_round = one_round
self.read_mode = read_mode
self.moog = [0.0, 0.0, 0.0, 0.0]
self.hold = [i in hold for i in self.parameters]
boundaries = [(-3.0, 1.0), (3500., 9000.), (0.5, 4.9), (0.0, 5.0)]
if set_boundaries and vals_boundaries is not None:
for i_p, p in enumerate(self.parameters):
if p in vals_boundaries:
imin, imax = boundaries[i_p]
boundaries[i_p] = (max(vals_boundaries[p][0], imin),
min(vals_boundaries[p][1], imax))
self.metallicity = AtmosQuantity('metallicity', self.ini[0], self.hold[0],
[-999.0, -999.0, -999.0], boundaries[0],
0.25, self.tol.ab, 0.0)
self.temperature = AtmosQuantity('temperature', self.ini[1], self.hold[1],
[-999.0, -999.0], boundaries[1], 50., self.tol.ep, 200.0)
self.gravity = AtmosQuantity('gravity', self.ini[2], self.hold[2],
[-999.0, -999.0], boundaries[2], 0.25, self.tol.dif, 0.2)
self.velocity = AtmosQuantity('velocity', self.ini[3], self.hold[3],
[-999.0, -999.0], boundaries[3], 0.25, self.tol.rw, 0.2)
self.nfailed = 0
self.exception = 1
self.change_antes = 'metallicity'
if in_errors:
self.n_repeat = 100
else:
self.n_repeat = 200
self.params = []
self.nit = 0
self.nbreak = 0
self.nit_total = 0
self.nout = 0
@property
def values(self):
met = self.metallicity.value
T = self.temperature.value
logg = self.gravity.value
micro = self.velocity.value
return (met, T, logg, micro)
@property
def boundaries(self):
met = self.metallicity.bounds
T = self.temperature.bounds
logg = self.gravity.bounds
micro = self.velocity.bounds
return (met, T, logg, micro)
def write_debug_moog(self):
if self.debug:
f = self.file_debug
f.info('Ran %s with: feh=%.2f, T=%.0f, logg=%.2f, micro=%.2f\n'\
'\t\t Obtained: ab=%.3f, ep=%.3f, dif=%.3f, rw=%.3f, nfailed=%d',\
self.change, *list(self.values), *self.moog, self.nfailed)
self.file_debug = f
del f
def write_log(self, message):
if self.debug:
f = self.file_debug
f.info(message)
self.file_debug = f
del f
def add_param(self):
self.params.append(list(self.values))
def check_correct_vals(self):
output = self.moog
if abs(output[0] - list(self.values)[0]) <= self.tol.ab and \
abs(output[1]) <= self.tol.ep and \
abs(output[2]) <= self.tol.dif and \
abs(output[3]) <= self.tol.rw:
self.write_log('Found right parameters')
del output
return -1
del output
return 0
def check_nout(self):
nout = self.nout
vals = list(self.values)
boundaries = list(self.boundaries)
for v, b in zip(vals[:-1], boundaries[:-1]):
if v < b[0] or v > b[1]:
nout += 1
if nout >= 3:
self.write_log('[Fe/H], T and log g are out of the possible ranges. '\
'Cannot find final parameters.')
self.exception = 2
del nout, vals, boundaries
return -1
del nout, vals, boundaries
return 0
@property
def call_params(self):
return self.metallicity, self.temperature, self.gravity, self.velocity
def check_nfailed(self):
if self.nfailed > 0:
for p in list(self.call_params):
if p.hold is False:
new_p = np.random.normal(p.value, p.width)
if new_p > p.bounds[1]:
new_p = p.bounds[1]
if new_p < p.bounds[0]:
new_p = p.bounds[1]
p.value = new_p
del new_p
p.ranges = [-999.0, -999.0]
if p.name == 'metallicity':
p.ranges = [-999.0, -999.0, p.value]
def check_nbreak(self):
if self.nbreak > 5:
self.exception = 2
self.write_log('Failed more than 5 times in the models.')
return -1
return 0
def check_params_rep(self):
params = self.params
vals = list(self.values)
if vals in params:
self.write_log('Parameters have already been used in another iteration.')
n = self.next_change(self.change_antes)
self.change = n
self.nit += 1
del params, vals
def check_nrepeat(self):
if self.nit >= self.n_repeat:
self.write_log('Parameters were repeated more than %d times.' % (self.n_repeat))
self.exception = 2
return -1
return 0
def check_nit_total(self):
if self.nit_total >= 500000:
self.write_log('More than 500000 iterations for the same star. Stopping.')
self.exception = 2
return -1
return 0
def check_hold(self, xmetal):
for i_p, p in enumerate(self.call_params):
if p.hold:
self.moog[i_p] = 0.0
if i_p == 0:
self.moog[i_p] = xmetal
@property
def show_hold(self):
return [p.hold for p in self.call_params]
def new_iteration(self, xmetal):
self.nit_total += 1
self.check_hold(xmetal)
self.nout = 0
self.add_param()
self.change_antes = self.change
def moog_output(self, output, nfail):
self.moog = output
self.nfailed = nfail
def new_values(self, new_vals):
for i_p, p in enumerate(self.call_params):
p.value = new_vals[i_p]
@staticmethod
def next_change(change_ini):
c = ['metallicity', 'temperature', 'pressure', 'velocity']
i = c.index(change_ini)
if i == 3:
i = -1
return c[i+1]
def new_change(self, change_ini=None):
if change_ini is None:
change_ini = self.change
self.change = self.next_change(change_ini)
def check_nfailed_it(self, change_ini):
if self.nfailed > 0:
self.new_change(change_ini)
self.write_log('Failed in metallicity. Change=%s' % (self.change))
self.nbreak += 1
def change_metallicity(self, new_val):
self.metallicity.value = new_val
def check_met(self):
met = self.metallicity.ranges
return (met[0] == met[1]) and (met[1] == met[2]) and (met[0] != self.metallicity.value)
def select_param(self, name):
for p in (self.metallicity, self.temperature, self.gravity, self.velocity):
if p.name == name:
break
return p
def change_parameter(self, name_par, moog_output, range_m, decimals):
m_val = self.moog[moog_output]
p = self.select_param(name_par)
ext = p.ranges
val = p.value
if m_val > p.tol:
p.ranges[0] = val
if val < ext[1]:
p.value = round(np.mean(ext), decimals)
else:
p.value = round(mult(val, range_m, 'upper'), decimals)
else:
p.ranges[1] = val
if ext[0] != -999. and val > ext[0]:
p.value = round(np.mean(ext), decimals)
else:
p.value = round(mult(val, range_m, 'floor'), decimals)
def runMOOG(self, atmos_values, fesun=7.50):
m, T, g, vt = atmos_values
interpol(self.starname, T, g, m, vt, self.alias, fesun=fesun)
cmd = str("MOOGSILENT > temp.log 2>&1 <<EOF\nMOOGFEB2017/ab_%s.par\n\nEOF" % self.alias)
os.system(cmd)
ab, ep, dif, rw, nfailed = compute_average_abundance(self.starname,\
w=False, alias=self.alias,\
mode=self.read_mode)
ab = ab - fesun
return ab, ep, dif, rw, nfailed
def check_boundaries(self, atmos_values):
boundaries = list(self.boundaries)
for v, b in zip(atmos_values, boundaries):
if v > b[1] or v < b[0]:
return False
return True
def check_if_hold(self, atmos_values):
#params = np.copy(np.array(atmos_values))
for i, p in enumerate(self.call_params):
if p.hold:
atmos_values[i] = self.ini[i]
return atmos_values[0], atmos_values[1:]
def update_moog(self, moog, nfailed):
for i_p, p in enumerate(self.call_params):
if p.hold:
self.moog[i_p] = 0.0
if i_p == 0:
self.moog[i_p] = self.ini[0]
else:
self.moog[i_p] = moog[i_p]
self.nfailed = nfailed
return self.moog
def objective_function_vec(self, X, met):
boundaries = self.check_boundaries([met, X[0], X[1], X[2]])
if boundaries:
ab, ep, dif, rw, nfailed = self.runMOOG([met, X[0], X[1], X[2]])
ab, ep, dif, rw = self.update_moog([ab, ep, dif, rw], nfailed)
return ep, rw, ab, ab-dif
return 10.**20., 10.**20., 10.**20., 10.**20.
def objective_function(self, X, met):
boundaries = self.check_boundaries([met, X[0], X[1], X[2]])
if boundaries:
ab, ep, dif, rw, nfailed = self.runMOOG([met, X[0], X[1], X[2]])
ab, ep, dif, rw = self.update_moog([ab, ep, dif, rw], nfailed)
return 5*((3.5* ep)**2.+(1.3*rw)**2.)+2*(dif)**2.
return 10.**20.
def simplex(self, S, met):
Xm = np.array([0, 0, 0], dtype=float)
Xr = np.array([0, 0, 0], dtype=float)
Xe = np.array([0, 0, 0], dtype=float)
Xc = np.array([0, 0, 0], dtype=float)
Xm = np.mean(S[:3, 1], axis=0)
Xr = 2*Xm - S[3][1]
met, Xr = self.check_if_hold(np.array([met, Xr[0], Xr[1], Xr[2]]))
fr = self.objective_function(Xr, met)
if S[0][0] <= fr < S[2][0]:
S[3][1] = Xr
S[3][0] = fr
elif fr < S[0][0]:
Xe = 3*Xm - 2*S[3][1]
met, Xe = self.check_if_hold(np.array([met, Xe[0], Xe[1], Xe[2]]))
fe = self.objective_function(Xe, met)
if fe < fr:
S[3][1] = Xe
S[3][0] = fe
else:
S[3][1] = Xr
S[3][0] = fr
else:
Xc = 0.5*(Xm + S[3][1])
met, Xc = self.check_if_hold(np.array([met, Xc[0], Xc[1], Xc[2]]))
fc = self.objective_function(Xc, met)
if fc <= S[3][0]:
S[3][1] = Xc
S[3][0] = fc
else:
for i in range(1, 4):
S[i][1] = 0.5*(S[0][1]+S[i][1])
met, S[i][1] = self.check_if_hold(np.array([met, S[i][1][0],\
S[i][1][1], S[i][1][2]]))
S[i][0] = self.objective_function(S[i][1], met)
S = S[np.argsort(S.T[0])]
del Xm, Xr, Xe, Xc
return S
def nelder_optimizer(self, it_simp, it_res_simp):
counter = 0
met, T, logg, vmic = self.ini
for i in range(it_res_simp):
log_string = '{:>2s} {:>8s} {:>8s} {:>5s} {:>5s} '\
'{:>8s} {:>8s} {:>8s} {:>8s}'.format('It', 'S', 'T', 'logg', 'vt',
'slp1', 'slp2',
'af1-af2', 'af1-met')
self.write_log(log_string)
self.write_log('{:-^68s}'.format('-'))
xin = np.array([T, logg, vmic])
metin = met
slp1, slp2, af1, af2 = self.objective_function_vec(xin, metin)
l1 = self.temperature.change if slp1 > 0 else -self.temperature.change
l2 = self.gravity.change if (af1-af2) > 0 else -self.gravity.change
l3 = self.velocity.change if slp2 > 0 else -self.velocity.change
met, X0 = self.check_if_hold(np.array([met, T, logg, vmic]))
met, X1 = self.check_if_hold(np.array([met, T+l1, logg, vmic]))
met, X2 = self.check_if_hold(np.array([met, T, logg+l2, vmic]))
met, X3 = self.check_if_hold(np.array([met, T, logg, vmic+l3]))
f0 = self.objective_function(X0, met)
f1 = self.objective_function(X1, met)
f2 = self.objective_function(X2, met)
f3 = self.objective_function(X3, met)
S = np.array([[f0, X0], [f1, X1], [f2, X2], [f3, X3]])
S = S[np.argsort(S.T[0])]
if np.any(np.isnan(S.T[0].astype(float))):
self.write_log('One of the values of S is nan. Stopping the computation.')
return S[0][1], met, 2
count_simp = 0
slp1, slp2, af1, af2 = self.objective_function_vec(S[0][1], met)
while (np.abs(slp1) > self.tol.ep or\
np.abs(slp2) > self.tol.rw or\
np.abs(af1 - af2) > self.tol.dif) and\
count_simp < it_simp:
Santes = np.copy(S)
S = self.simplex(S, met)
if Santes == S:
self.write_log('No change in S from previous value. Stopping cycle')
break
slp1, slp2, af1, af2 = self.objective_function_vec(S[0][1], met)
self.new_values([af1, S[0][1][0], S[0][1][1], S[0][1][2]])
count_simp += 1
for j in range(4):
if j == 0:
log_string = '{:2d} {: 7.5f} {:7.3f} {:4.3f} {:4.3f} {: 6.5f} {: 6.5f} '\
'{: 6.5f} {: 6.5f}'.format(count_simp, S[0][0], S[0][1][0],
S[0][1][1], S[0][1][2], slp1,
slp2, af1-af2, af1-met)
else:
log_string = '{:2d} {: 7.5f} {:7.3f} {:4.3f} {:4.3f}'.format(count_simp,
S[j][0],
S[j][1][0],
S[j][1][1],
S[j][1][2])
self.write_log(log_string)
if self.check_correct_vals() == -1:
counter += 1
T = S[0][1][0]
logg = S[0][1][1]
vmic = S[0][1][2]
met = af1
self.new_values([met, T, logg, vmic])
if counter > 1:
self.write_log('###############################################')
self.write_log('Coverged at:')
log_string = '{:>5s} {:>8s} {:>5s} {:>5s} {:>5s} {:>8s} {:>8s} '\
'{:>8s} {:>8s}'.format('Cycle', 'T', 'logg', 'vt', 'met', 'slp1',
'slp2', 'af1-af2', 'af1-met')
self.write_log(log_string)
log_string = '{:5d} {:7.3f} {:4.3f} {:4.3f} {:4.3f} {: 6.5f} {: 6.5f} '\
'{: 6.5f} {: 6.5f}'.format(i+1, T, logg, vmic, met,
slp1, slp2, af1-af2, af1-met)
self.write_log(log_string)
return S[0][1], met, 1
if (counter == 1) and self.one_round:
self.write_log('###############################################')
self.write_log('Coverged only one time at:')
log_string = '{:>5s} {:>8s} {:>5s} {:>5s} {:>5s} {:>8s} '\
'{:>8s} {:>8s} {:>8s}'.format('Cycle', 'T', 'logg', 'vt', 'met',
'slp1', 'slp2', 'af1-af2', 'af1-met')
self.write_log(log_string)
log_string = '{:5d} {:7.3f} {:4.3f} {:4.3f} {:4.3f} {: 6.5f} {: 6.5f} '\
'{: 6.5f} {: 6.5f}'.format(i+1, T, logg, vmic, met, slp1, slp2,
af1-af2, af1-met)
self.write_log(log_string)
return S[0][1], met, 1
self.write_log('###############################################')
self.write_log('New Cycle')
self.write_log('{:>8s} {:>5s} {:>5s} {:>6s}'.format('T', 'logg', 'vt', 'met'))
self.write_log('{:7.3f} {:4.3f} {:4.3f} {: 4.3f}'.format(T, logg, vmic, af1))
return S[0][1], met, 2
#******************************************************************************
def runODR(x, y, weights, deg=1):
isort = np.argsort(x)
func = ODR.polynomial(deg)
mydata = ODR.Data(x=x[isort], y=y[isort], we=weights[isort])
myodr = ODR.ODR(mydata, func)
myoutput = myodr.run()
beta = myoutput.beta[1]
del isort, func, mydata, myodr, myoutput
return beta
#@profile
def compute_average_abundance(starname, w=False, alias='test', mode='linearregression'):
nfailed = 0
flag = -1
ep = dif = rw = final_Fe = -99
abund = {'ab':{'FeI':-99, 'FeII': -999},\
'lines':{'FeI':[], 'FeII':[]}}
names = ['FeI', 'FeII']
names_file = ['Fe I', 'Fe II']
failed1 = re.compile(r'OH NO! ANOTHER FAILED ITERATION!')
failed2 = re.compile(r'CANNOT DECIDE ON A LINE WAVELENGTH STEP FOR')
line1 = re.compile(r'[a-z]')
line2 = re.compile(r'[\d]+ [\d]+.+')
epline = re.compile(r'E.P. correlation')
rwline = re.compile(r'R.W. correlation')
abline = re.compile(r'average abundance')
with open('./output/%s_out.test' % alias, 'r') as filemoog:
for line in filemoog:
line = line.strip()
if failed1.search(line) or failed2.search(line):
nfailed += 1
for p in range(2):
m = re.search(r'Abundance Results for Species (%s\s)\.*' % names_file[p], line)
if m:
flag = p
m = line1.search(line)
if m is None:
m = line2.search(line)
if m:
abund['lines'][names[flag]].append(line)
m = epline.search(line)
if m and flag == 0:
ep = float(line.split()[4])
m = rwline.search(line)
if m and flag == 0:
rw = float(line.split()[4])
m = abline.search(line)
if m:
abund['ab'][names[flag]] = float(line.split()[3])
del failed1, failed2, line1, line2, epline, rwline, abline, filemoog
if mode == 'linearregression':
for p in names:
ab = np.array([fe.split()[6] for fe in abund['lines'][p]], dtype=float)
if p == 'FeI':
iclip = sigma_clip(ab, maxiters=1)
a_list = np.array([list(map(fe.split().__getitem__, [2, 5]))\
for fe in abund['lines'][p]], dtype=float).T
ep_list = a_list[0]
rw_list = a_list[1]
isort_ep = np.argsort(ep_list[~iclip.mask])
isort_rw = np.argsort(rw_list[~iclip.mask])
ep, _, _, _, _ = linregress(ep_list[~iclip.mask][isort_ep],\
ab[~iclip.mask][isort_ep])
rw, _, _, _, _ = linregress(rw_list[~iclip.mask][isort_rw],\
ab[~iclip.mask][isort_rw])
abund['ab'][p] = np.mean(ab[~iclip.mask])
del iclip, ep_list, rw_list, isort_ep, isort_rw, a_list
else:
abund['ab'][p] = np.median(ab)
del ab
elif mode == 'odr':
filename = './EW/%s.txt' % starname
filelines = ascii.read(filename, include_names=('col1', 'col2', 'col4', 'col5'))
file_wave = filelines['col1']
file_ew = filelines['col2']
file_e_ew = np.maximum(filelines['col4'], filelines['col5'])
for p in names:
ab = np.array([fe.split()[6] for fe in abund['lines'][p]], dtype=float)
if p == 'FeI':
a_list = np.array([list(map(fe.split().__getitem__, [2, 5]))\
for fe in abund['lines'][p]], dtype=float).T
ep_list = a_list[0]
rw_list = a_list[1]
wave = np.array([fe.split()[0] for fe in abund['lines'][p]], dtype=float)
ew = np.array([file_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ew_err = np.array([file_e_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
weights = 1./(ew_err/ew)
weights = weights/np.sum(weights)
ep = runODR(ep_list, ab, weights=weights)
rw = runODR(rw_list, ab, weights=weights)
abund['ab'][p] = np.mean(unumpy.uarray(ab, (ew_err/ew)/np.sum(ew_err/ew))).n
del wave, ew_err, a_list, ep_list, rw_list, weights, ew
else:
wave = np.array([fe.split()[0] for fe in abund['lines'][p]], dtype=float)
ew = np.array([file_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ew_err = np.array([file_e_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
abund['ab'][p] = np.mean(unumpy.uarray(ab, (ew_err/ew)/np.sum(ew_err/ew))).n
del wave, ew_err, ew
del ab
del filelines, file_wave, file_ew, file_e_ew
if w:
filename = './EW/%s.txt' % starname
filelines = ascii.read(filename, include_names=('col1', 'col2', 'col3', 'col4', 'col5'))
file_wave = filelines['col1']
file_ew = filelines['col2']
file_e_ew = np.maximum(filelines['col4'], filelines['col5'])
for p in names:
a_list = np.array([list(map(fe.split().__getitem__, [0, 6]))\
for fe in abund['lines'][p]], dtype=float).T
wave = a_list[0]
ab = a_list[1]
w = np.array([1./file_e_ew[int(np.where(file_wave == wv)[0])] \
for wv in wave])
if sum(w) != 0.:
abund['ab'][p] = round(np.average(ab, weights=w), 3)
else:
abund['ab'][p] = np.mean(ab)
del filename, filelines, file_wave, file_ew, file_e_ew, wave, ab, w
dif = abund['ab']['FeI'] - abund['ab']['FeII']
final_Fe = abund['ab']['FeI']
del abund, names, names_file
return final_Fe, ep, dif, rw, nfailed
#******************************************************************************
def mult(x, base, level):
"""
Finds the multiple of 'base' closer to the number x.
Input: x: number
base: base for which we want to compute the closer value
level: 'upper' or 'floor'
the higher or lower multiple of 'base' of x.
Return: closer multiple of 'base' to the number x.
"""
num = math.floor(old_div(x, base))
if level == 'upper':
final = (num + 1)*base
else:
final = num*base
if final == x:
final = final - base
return final
def calc_params(star, hold, init_vals, debug, log_f=None,\
set_boundaries=False, vals_boundaries=None,\
in_errors=False, alias='test',
minimization='per_parameter',
one_round=False, read_mode='linearregression'):
"""
Computes the stellar parameters for a certain star.
Uses the methods: Metallicity, Temperature, Pressure and Velocity.
Input: star: name of the star.
hold : array with the values that are not to be changed.
When empty, all the values will be changed.
init_vals : initial parameters.
When empty, default values will be used.
debug : True/False,
if you want to turn on or off the debugging option.
file_debug : File where the output from the debugging
will be stored.
Returns: T: temperature of the model.
logg: surface gravity.
xmetal: metallicity.
micro: microturbulence velocity.
exception: 1 or 2. If 1, then it founds the correct parameters.
If 2, it encountered a problem and couldn't converge to a solution.
"""
# Creates the atmosphere object, where all the data concerning the
# computation of the atmospheric parameters will be stored.
a = atmos(star, hold, init_vals, debug, log_f, in_errors, \
set_boundaries, vals_boundaries, alias=alias,\
one_round=one_round, read_mode=read_mode)
h_array = a.show_hold
a.write_log('hold_m=%s, hold_t=%s, hold_p=%s, hold_v=%s' % \
(h_array[0], h_array[1], h_array[2], h_array[3]))
line_hold_param = ', '.join(['%s = (%.2f, %.2f)' % (n, b[0], b[1])\
for n, b in zip(a.parameters, a.boundaries)])
a.write_log('Boundaries are: %s' % line_hold_param)
del line_hold_param
a.write_log('Initial values are: feh=%.2f, T=%.0f, logg=%.2f, vt=%.2f' % \
(init_vals[0], init_vals[1], init_vals[2], init_vals[3]))
# Modifies the output from MOOG if hold metallicity is 'yes'
xmetal_i = a.metallicity.value
a.check_hold(xmetal_i)
if minimization == 'per_parameter':
# First iteration with MOOG.
ab, ep, dif, rw, nfailed = runMOOG(a.starname, a.values, a.alias, a.read_mode)
a.moog_output([ab, ep, dif, rw], nfailed)
a.write_log('ab=%.3f, ep=%.3f, dif=%.3f, rw=%.3f, nfailed=%d' % \
(ab, ep, dif, rw, nfailed))
a.metallicity.ranges[2] = ab
a.write_log('change=%s' % a.change)
i = 0
while True:
# Values that need to be reset each iteration
a.new_iteration(xmetal_i)
# Found the right values
if a.check_correct_vals() == -1:
break
# If all the parameters are out of range, break the calculation
if a.check_nout() == -1:
break
# Parameters still in the permitted ranges
change = a.change
if change == 'metallicity':
a = Metallicity(a)
ab = a.moog[0]
a.metallicity.ranges = [-999., -999., ab]
a.temperature.ranges = [-999., -999.]
a.gravity.ranges = [-999., -999.]
a.velocity.ranges = [-999., -999.]
i = 0
a.check_nfailed_it(change)
elif change == 'temperature':
a, i = Temperature(a, i)
a.check_nfailed_it(change)
elif change == 'pressure':
a, i = Pressure(a, i)
a.check_nfailed_it(change)
else:
a, i = Velocity(a, i)
a.check_nfailed_it(change)
# If an iteration failed, change the input parameters
# according to a normal distribution
a.check_nfailed()
# If the iteration has failed more than 5 times,
# break the calculation
if a.check_nbreak() == -1:
break
# If the parameters for an iteration are the same
# as a previous one, save them
a.check_params_rep()
# If the parameters are repeated more than
# 500 times, break the calculation
if a.check_nrepeat() == -1:
break
# If mnore than 1 million iterations have been performed
# and the values have not converge, stop the calculation
if a.check_nit_total() == -1:
break
a.write_log('change is %s' % change)
xmetal, T, logg, micro = list(a.values)
exception = a.exception
a.write_log('Final parameters for %s: feh=%.3f, T=%.0f, logg=%.3f, micro=%.3f' %\
(star, xmetal, T, logg, micro))
del a, i, xmetal_i, h_array
elif minimization == 'downhill_simplex':
# First iteration with MOOG.
ab, ep, dif, rw, nfailed = runMOOG(a.starname, a.values, a.alias, a.read_mode)
ab, ep, dif, rw = a.update_moog([ab, ep, dif, rw], nfailed)
if a.check_correct_vals() == -1:
vals = a.values
del a
return vals[1], vals[2], vals[0], vals[3], 1
[T, logg, micro], xmetal, exception = a.nelder_optimizer(60, 10)
del a
return T, logg, xmetal, micro, exception
def Metallicity(atm):
"""
Runs MOOG with a model atmosphere in which the
metallicity is the value that changes.
It stops running when the abundances derived by MOOG
are the same as the input value.
"""
c = 0
nfailed = 0
nit_total = 0
while True:
nit_total += 1
if nit_total > 100000:
atm.write_log('Iteration in Metallicity was completed '\
'more than 100000 times. '\
'Stopping the computation.')
atm.new_change()
break
xmetal = atm.metallicity.value
xmetal_antes = xmetal
ab, ep, dif, rw = atm.moog
if abs(ab - xmetal) <= atm.tol.ab:
atm.new_change()
break
else:
if c > 50:
if abs(ep) <= atm.tol.ep:
atm.new_change('temperature')
else:
atm.new_change()
break
atm.change_metallicity(ab)
xmetal = atm.metallicity.value
bound_min, bound_max = atm.metallicity.bounds
if xmetal < bound_min or xmetal > bound_max:
atm.write_log('Not possible to compute parameters '\
'for [Fe/H] < %.1f or [Fe/H] > %.1f. '\
'Check the boundaries of your parameter.' % \
(bound_min, bound_max))
atm.new_change()
atm.change_metallicity(xmetal_antes)
break
ab, ep, dif, rw, nfailed = runMOOG(atm.starname, atm.values,
atm.alias, atm.read_mode)
atm.moog_output([ab, ep, dif, rw], nfailed)
atm.write_debug_moog()
del ab, ep, dif, rw, xmetal, xmetal_antes
if nfailed > 0:
atm.new_change()
break
c += 1
del c, nit_total, nfailed
return atm
#******************************************************************************
def Temperature(atm, i):
"""
Runs MOOG with a model atmosphere in which the
temperature is the value that changes.
It stops running when the correlation between Ab(FeI)
and excitation potential is less than 0.002
"""
nfailed = 0
nit_total = 0
while True:
nit_total += 1
if nit_total > 100000:
atm.write_log('Iteration in Temperature was completed '\
'more than 100000 times. '\
'Stopping the computation.')
atm.new_change()
break
Tantes = atm.temperature.value
ab, ep, dif, rw = atm.moog
if abs(ep) <= atm.tol.ep:
if ab == atm.metallicity.value:
atm.new_change()
else:
atm.new_change('velocity')
break
else:
atm.change_parameter('temperature', 1, 250., 0)
T = atm.temperature.value
atm.write_log('T is %.0f, Tantes is %.0f' % (T, Tantes))
bound_min, bound_max = atm.temperature.bounds
if T > bound_max or T < bound_min:
atm.write_log('Not possible to compute parameters '\
'for T < %d or T > %d. '\
'Check the boundaries of your parameter.' % \
(int(bound_min), int(bound_max)))
atm.new_change()
atm.temperature.value = Tantes #???????????????????
if T < 3500.:
atm.temperature.value = 3500.
break
ab, ep, dif, rw, nfailed = runMOOG(atm.starname, atm.values,
atm.alias, atm.read_mode)
atm.moog_output([ab, ep, dif, rw], nfailed)
atm.write_debug_moog()
atm.metallicity.ranges[i] = ab
del ab, ep, dif, rw, T, Tantes
i += 1
if i == 3:
i = 0
if nfailed > 0:
atm.new_change()
break
if atm.check_met():
atm.new_change('velocity')
break
del nit_total, nfailed
return atm, i
#******************************************************************************
def Pressure(atm, i):
"""
Runs MOOG with a model atmosphere in which the
surface gravity is the value that changes.
It stops running when the difference between the abundances
derived for FeI and FeII is less than 0.002.
"""
nfailed = 0
nit_total = 0
while True:
nit_total += 1
if nit_total > 100000:
atm.write_log('Iteration in Pressure was completed '\
'more than 100000 times. '\
'Stopping the computation.')
atm.new_change()
break
logg_antes = atm.gravity.value
ab, ep, dif, rw = atm.moog
if abs(dif) <= atm.tol.dif:
if ab == atm.metallicity.value:
atm.new_change()
else:
atm.new_change('velocity')
break
else:
atm.change_parameter('gravity', 2, 0.25, 5)
logg = atm.gravity.value
bound_min, bound_max = atm.gravity.bounds
if logg < bound_min or logg > bound_max:
atm.write_log('Not possible to compute parameters '\
'for log g < %.1f or log g > %.1f. '\
'Check the boundaries of your parameter.' % \
(bound_min, bound_max))
atm.new_change()
atm.gravity.value = logg_antes
break
ab, ep, dif, rw, nfailed = runMOOG(atm.starname, atm.values,
atm.alias, atm.read_mode)
atm.moog_output([ab, ep, dif, rw], nfailed)
atm.write_debug_moog()
atm.metallicity.ranges[i] = ab
del ab, ep, dif, rw, logg, logg_antes
i += 1
if i == 3:
i = 0
if nfailed > 0:
atm.new_change()
break
if atm.check_met():
atm.new_change('velocity')
break
del nit_total, nfailed
return atm, i
#******************************************************************************
def Velocity(atm, i):
"""
Runs MOOG with a model atmosphere in which the
microturbulence velocity is the value that changes.
It stops running when the correlation between Ab(FeI)
and reduced EW is less than 0.002
"""
nfailed = 0
nit_total = 0
v = 0
while True:
nit_total += 1
if nit_total > 100000:
atm.write_log('Iteration in Velocity was completed '\
'more than 100000 times. '\
'Stopping the computation.')
atm.new_change()
micro_antes = atm.velocity.value
ab, ep, dif, rw = atm.moog
v += 1
if v == 50:
atm.new_change()
break
if abs(rw) <= atm.tol.rw:
if ab == atm.metallicity.value:
atm.new_change('metallicity')
else:
atm.new_change('velocity')
break
else:
atm.change_parameter('velocity', 3, 0.25, 5)
micro = atm.velocity.value
bound_min, bound_max = atm.velocity.bounds
if micro < bound_min or micro > bound_max:
atm.write_log('Not possible to compute parameters '\
'for micro < %.1f or micro > %.1f. '\
'Check the boundaries of your parameter.' % \
(bound_min, bound_max))
atm.new_change()
atm.velocity.value = micro_antes
break
ab, ep, dif, rw, nfailed = runMOOG(atm.starname, atm.values,
atm.alias, atm.read_mode)
atm.moog_output([ab, ep, dif, rw], nfailed)
atm.write_debug_moog()
atm.metallicity.ranges[i] = ab
del ab, ep, dif, rw, micro_antes, micro
i += 1
if i == 3:
i = 0
if nfailed > 0:
atm.new_change()
break
if atm.check_met():
atm.new_change('velocity')
break
del nit_total, nfailed, v
return atm, i
#******************************************************************************
def runMOOG(starname, atmos_values, alias='test', read_mode='linearregression', fesun=7.50):
m, T, g, vt = atmos_values
interpol(starname, T, g, m, vt, alias, fesun=fesun)
cmd = str("MOOGSILENT > temp.log 2>&1 <<EOF\nMOOGFEB2017/ab_%s.par\n\nEOF" % alias)
os.system(cmd)
ab, ep, dif, rw, nfailed = compute_average_abundance(starname, w=False,
alias=alias, mode=read_mode)
ab = ab - fesun
return ab, ep, dif, rw, nfailed
elements = np.array(['H','He','Li','Be','B','C','N','O','F','Ne',
'Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca',
'Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr',
'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn',
'Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd',
'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','Wl','Re','Os','Ir','Pt','Au','Hg',
'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th',
'Pa','U','Np','Pu','Am'])
# From Asplund et al. (2009, Ann. Rev. Ast. Ap., 47, 481)
solarabund = np.array([12.00,10.93, 1.05, 1.38, 2.70, 8.43, 7.83, 8.69, 4.56, 7.93,
6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.03, 6.34,
3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56,
3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.52, 2.87, 2.21, 2.58,
1.46, 1.88,-5.00, 1.75, 0.91, 1.57, 0.94, 1.71, 0.80, 2.04,
1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42,
-5.00, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.84,
0.10, 0.85,-0.12, 0.85, 0.26, 1.40, 1.38, 1.62, 0.92, 1.17,
0.90, 1.75, 0.65,-5.00,-5.00,-5.00,-5.00,-5.00,-5.00, 0.02,
-5.00,-0.54,-5.00,-5.00,-5.00])
def runMOOG_ab(starname, T, g, m, vt, use_w=False, alias='test', nions=14,\
nameions=['NaI', 'MgI', 'AlI', 'SiI', 'CaI', 'TiI', 'TiII', 'CrI', 'MnI',\
'FeI', 'FeII', 'NiI', 'CuI', 'ZnI']):
use_w = True
interpol(starname, T, g, m, vt, alias)
cmd = 'MOOGSILENT > temp.log 2>&1 << EOF\nMOOGFEB2017/ab_%s.par%sEOF'\
% (alias, '\n'*nions)
os.system(cmd)
abund = {}
lines = {}
wave = {}
ab = {}
dev = {}
names_file = [n[:n.index('I')]+' '+n[n.index('I'):] for n in nameions]
element_name = [n[:n.index('I')] for n in nameions]
for n in nameions:
lines[n] = np.array([])
wave[n] = np.array([])
ab[n] = np.nan
dev[n] = np.nan
abund['lines'] = lines
abund['wave'] = wave
abund['ab'] = ab
abund['dev'] = dev
del lines, wave, ab, dev
flagab = 0
with open('./output/%s_out.test' % alias) as output:
for linea in output:
linea = linea.strip()
for p in range(len(nameions)):
m = re.search(r'Abundance Results for Species ' + names_file[p] + '\.*', linea)
if m:
flagab = p+1
m = re.search(r'[a-z]', linea)
if m is None:
m = re.search(r'[\d]+ [\d]+.+', linea)
if m:
linea = linea.split()
wave_linea = float(linea[0])
ab_linea = float(linea[6])
dev_linea = float(linea[7])
if abs(dev_linea) < 10.0:
abund['lines'][nameions[flagab-1]] = np.append(abund['lines'][nameions[flagab-1]],\
ab_linea)
abund['wave'][nameions[flagab-1]] = np.append(abund['wave'][nameions[flagab-1]],\
wave_linea)
del names_file
for p in nameions:
c, low, upp = sigmaclip(abund['lines'][p], 1.5, 1.5)
index = np.where(np.in1d(abund['lines'][p], c))[0]
if index.size > 0:
abund['lines'][p] = abund['lines'][p][index]
abund['wave'][p] = abund['wave'][p][index]
del c, low, upp, index
if use_w:
filename = './EW/' + starname + '.txt'
filelines = ascii.read(filename, include_names=('col1', 'col2', 'col4', 'col5'))
file_wave = np.array([np.rint(v*100.0)/100.0 for v in filelines['col1']])
file_ew = filelines['col2']
file_e_ew = np.maximum(filelines['col4'], filelines['col5'])
#print(file_wave)
w = {}
for p in nameions:
#print(p)
#print(np.std(abund['lines'][p]))
#w[p] = np.array([1./file_e_ew[np.argmin(np.abs(abund['wave'][p][i]-file_wave))]\
# for i in range(len(abund['lines'][p]))])
#if sum(w[p]) != 0:
# abund['ab'][p] = round(np.average(abund['lines'][p], weights=w[p]), 3)
# abund['dev'][p] = round(np.sqrt(np.average((abund['lines'][p] - \
# np.mean(abund['lines'][p]))**2., \
# weights=w[p])), 3)
#else:
if len(abund['lines'][p]) > 0:
m = unumpy.uarray(abund['lines'][p], 0.2*np.ones(abund['lines'][p].size))
#abund['ab'][p] = np.mean(abund['lines'][p])
#abund['dev'][p] = np.std(abund['lines'][p])
#print(np.mean(m))
abund['ab'][p] = np.mean(m).n
abund['dev'][p] = np.mean(m).s
else:
abund['ab'][p] = np.nan
abund['dev'][p] = np.nan
del w, filelines, file_wave, file_ew, file_e_ew
else:
for p in nameions:
if abund['lines'][p]:
abund['ab'][p] = np.mean(abund['lines'][p])
abund['dev'][p] = np.std(abund['lines'][p])
abund_dict = {}
for ii, n in enumerate(nameions):
solar = 0.0
if element_name[ii] in elements:
solar = solarabund[np.where(elements == element_name[ii])[0]]
if np.isnan(abund['ab'][n]):
abund_dict[n] = [abund['ab'][n], abund['dev'][n], len(abund['lines'][n])]
else:
abund_dict[n] = [abund['ab'][n]-solar, abund['dev'][n], len(abund['lines'][n])]
del abund
#print(abund_dict)
return abund_dict
def plot_output_file(starname):
# Read the result from the moog output file
nfailed = 0
flag = -1
ep_moog = rw_moog = -99
abund = {'ab':{'FeI':-99, 'FeII': -999},\
'lines':{'FeI':[], 'FeII':[]}}
names = ['FeI', 'FeII']
names_file = ['Fe I', 'Fe II']
failed1 = re.compile(r'OH NO! ANOTHER FAILED ITERATION!')
failed2 = re.compile(r'CANNOT DECIDE ON A LINE WAVELENGTH STEP FOR')
line1 = re.compile(r'[a-z]')
line2 = re.compile(r'[\d]+ [\d]+.+')
epline = re.compile(r'E.P. correlation')
rwline = re.compile(r'R.W. correlation')
abline = re.compile(r'average abundance')
with open('output/MOOG_output_files/%s_out.test' % starname, 'r') as filemoog:
for line in filemoog:
line = line.strip()
if failed1.search(line) or failed2.search(line):
nfailed += 1
for p in range(2):
m = re.search(r'Abundance Results for Species (%s\s)\.*' % names_file[p], line)
if m:
flag = p
m = line1.search(line)
if m is None:
m = line2.search(line)
if m:
abund['lines'][names[flag]].append(line)
m = epline.search(line)
if m and flag == 0:
ep_moog = float(line.split()[4])
m = rwline.search(line)
if m and flag == 0:
rw_moog = float(line.split()[4])
m = abline.search(line)
if m:
abund['ab'][names[flag]] = float(line.split()[3])
del failed1, failed2, line1, line2, epline, rwline, abline, filemoog
ab_FeI_moog = abund['ab']['FeI']
ab_FeII_moog = abund['ab']['FeII']
# Read the EW information
filelines = ascii.read('EW/%s.txt' % starname, include_names=('col1', 'col2', 'col4', 'col5'))
file_wave = filelines['col1']
file_ew = filelines['col2']
file_e_ew = np.maximum(filelines['col4'], filelines['col5'])
fig, ax = plt.subplots(3, 1, figsize=(10, 7))
for p in names:
ab = np.array([fe.split()[6] for fe in abund['lines'][p]], dtype=float)
if p == 'FeI':
# Using sigma clipping and linear regression
iclip = sigma_clip(ab, maxiters=1)
a_list = np.array([list(map(fe.split().__getitem__, [2, 5]))\
for fe in abund['lines'][p]], dtype=float).T
ep_list = a_list[0]
rw_list = a_list[1]
isort_ep = np.argsort(ep_list[~iclip.mask])
isort_rw = np.argsort(rw_list[~iclip.mask])
wave = np.array([fe.split()[0] for fe in abund['lines'][p]], dtype=float)
ew = np.array([file_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ew_err = np.array([file_e_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ep, ep_intercept, _, _, ep_err = linregress(ep_list[~iclip.mask][isort_ep],
ab[~iclip.mask][isort_ep])
rw, rw_intercept, _, _, rw_err = linregress(rw_list[~iclip.mask][isort_rw],
ab[~iclip.mask][isort_rw])
abund['ab'][p] = np.mean(ab[~iclip.mask])
# Using ODR
isort_ep_odr = np.argsort(ep_list)
isort_rw_odr = np.argsort(rw_list)
weights = 1./(ew_err/ew)
weights = weights/np.sum(weights)
func = ODR.polynomial(1)
mydata = ODR.Data(x=ep_list[isort_ep_odr], y=ab[isort_ep_odr],
we=weights[isort_ep_odr])
myodr = ODR.ODR(mydata, func)
myoutput = myodr.run()
ep_odr = myoutput.beta[1]
ep_intercept_odr = myoutput.beta[0]
mydata = ODR.Data(x=rw_list[isort_rw_odr], y=ab[isort_rw_odr],
we=weights[isort_rw_odr])
myodr = ODR.ODR(mydata, func)
myoutput = myodr.run()
rw_odr = myoutput.beta[1]
rw_intercept_odr = myoutput.beta[0]
del weights, func, mydata, myodr, myoutput
ab_FeI_w = np.mean(unumpy.uarray(ab, (ew_err/ew)/np.sum(ew_err/ew)))
ax[0].plot(ep_list, ab, ls='None', marker='o', color='steelblue')
ax[0].plot(ep_list[~iclip.mask], ab[~iclip.mask], ls='None', marker='o', color='orange')
ax[0].plot(ep_list[~iclip.mask][isort_ep],
ep*ep_list[~iclip.mask][isort_ep]+ep_intercept,
color='red', label='ep slope = %.4f $\pm$ %.4f' % (ep, ep_err))
ax[0].plot(ep_list[~iclip.mask][isort_ep],
ep_moog*ep_list[~iclip.mask][isort_ep]+ep_intercept,
ls='--', color='red', label='ep slope moog = %.4f' % ep_moog)
ax[0].plot(ep_list[~iclip.mask][isort_ep],
ep_odr*ep_list[~iclip.mask][isort_ep]+ep_intercept_odr,
ls='--', color='green', label='ep slope ODR = %.4f' % ep_odr)
ax[1].plot(rw_list, ab, ls='None', marker='o', color='steelblue')
ax[1].plot(rw_list[~iclip.mask], ab[~iclip.mask], ls='None', marker='o', color='orange')
ax[1].plot(rw_list[~iclip.mask][isort_rw],
rw*rw_list[~iclip.mask][isort_rw]+rw_intercept,
color='red', label='rw slope = %.4f $\pm$ %.4f' % (rw, rw_err))
ax[1].plot(rw_list[~iclip.mask][isort_rw],
rw_moog*rw_list[~iclip.mask][isort_rw]+rw_intercept,
ls='--', color='red', label='rw slope moog = %.4f' % rw_moog)
ax[1].plot(rw_list[~iclip.mask][isort_rw],
rw_odr*rw_list[~iclip.mask][isort_rw]+rw_intercept_odr,
ls='--', color='green', label='rw slope odr = %.4f' % rw_odr)
ax[2].plot(wave[iclip.mask], ab[iclip.mask], ls='None', marker='x', color='steelblue')
ax[2].plot(wave[~iclip.mask], ab[~iclip.mask], ls='None', marker='o', color='steelblue')
ax[2].axhline(np.mean(ab[~iclip.mask]), color='steelblue',
label='FeI = %.4f' % np.mean(ab[~iclip.mask]))
ax[2].axhline(ab_FeI_w.n, color='steelblue', ls=':',
label='FeI ODR = %.4f' % ab_FeI_w.n)
ax[2].axhline(ab_FeI_moog, ls='--', color='steelblue',
label='FeI moog = %.4f' % ab_FeI_moog)
del iclip, ep_list, rw_list, isort_ep, isort_rw, a_list, isort_ep_odr, isort_rw_odr,\
wave, ew_err, ew
else:
abund['ab'][p] = np.median(ab)
wave = np.array([fe.split()[0] for fe in abund['lines'][p]], dtype=float)
ew = np.array([file_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ew_err = np.array([file_e_ew[int(np.where(file_wave == wv)[0])] for wv in wave])
ab_FeII_w = np.mean(unumpy.uarray(ab, (ew_err/ew)/np.sum(ew_err/ew)))
ax[2].plot(wave, ab, ls='None', marker='o', color='orange')
ax[2].axhline(np.mean(ab), color='orange',
label='FeII = %.4f\ndif = %.4f' %\
(np.median(ab), abund['ab']['FeI'] - abund['ab']['FeII']))
ax[2].axhline(ab_FeII_w.n, color='orange', ls=':',
label='FeII ODR = %.4f\ndif ODR = %.4f' % (ab_FeII_w.n,
ab_FeI_w.n - ab_FeII_w.n))
ax[2].axhline(ab_FeII_moog, color='orange', ls='--',
label='FeII moog = %.4f\ndif moog = %.4f' % (ab_FeII_moog,
ab_FeI_moog - ab_FeII_moog))
del wave, ew_err, ew
ax[0].set_xlabel('Excitation Potential')
ax[0].set_ylabel('FeI abundance')
ax[1].set_xlabel('Reduced Equivalent Width')
ax[1].set_ylabel('FeI abundance')
ax[2].set_xlabel('Wavelength')
ax[2].set_ylabel('Abundance')
ax[0].legend(loc='upper left', fontsize='x-small')
ax[1].legend(loc='upper left', fontsize='x-small')
ax[2].legend(loc='upper left', ncol=3, fontsize='x-small')
fig.subplots_adjust(hspace=0.35, left=0.08, right=0.95, top=0.98, bottom=0.1)
fig.savefig('output/MOOG_output_files/%s.pdf' % starname)
plt.close('all')
del filelines, file_wave, file_e_ew, abund, fig, ab
| mit |
lkc9015/freestyle_project | app/NLP_LDA.py | 1 | 4348 | import csv
import sklearn.feature_extraction.text as text
import numpy as np
from sklearn.decomposition import LatentDirichletAllocation
import matplotlib.pyplot as plt
### customize function ###
def read_letter_from_file(csv_file_path):
letters = []
with open(csv_file_path, "r") as csv_file:
reader = csv.reader(csv_file)
next(reader)
for row in reader:
letters.append(row[3])
return letters
def read_company_from_file(csv_file_path):
company = []
with open(csv_file_path, "r") as csv_file:
reader = csv.reader(csv_file)
next(reader)
for row in reader:
company.append(row[1])
return company
def number_of_comapany(company):
return len(set(company))
def name_of_company(company):
return set(company)
### run the code ###
def run():
file_path = "data\shareholders_letter.csv"
letters = read_letter_from_file(csv_file_path = file_path) # read letter, test 1
company = read_company_from_file(csv_file_path = file_path) # read company, test 2
### Change the texts into Documents Term Matix (dtm) ####
# remove stopwords and words that appear less than five times
vectorizer = text.CountVectorizer(input = letters, stop_words = 'english', lowercase = True, min_df = 10)
dtm = vectorizer.fit_transform(letters) # create dtm
vocab = np.array(vectorizer.get_feature_names()) # list of words and change it to an array
### Generate ten topics & ten top words in each topic ###
n_topics = 10
n_top_words = 10
# classifier
lda = LatentDirichletAllocation(n_topics = n_topics, learning_method = 'batch', random_state=0)
doctopic = lda.fit_transform(dtm)
### Print out topic words ###
topic_words = []
for topic in lda.components_:
word_idx = np.argsort(topic)[::-1][0:n_top_words]
topic_words.append([vocab[i] for i in word_idx])
print("-" * 101)
print("-" * 45 + "Topic words" + "-" * 45)
print("-" * 101)
for topic in range(len(topic_words)):
print("+ Topic {}: {}".format(topic, ' '.join(topic_words[topic])))
print("-" * 101)
print("-" * 101)
### Topic shares associated with each company ###
doctopic = doctopic / np.sum(doctopic, axis=1, keepdims=True)
company = np.asarray(company)
num_companies = number_of_comapany(company) # number of company, test 3
doctopic_grouped = np.zeros((num_companies, n_topics))
for i, name in enumerate(sorted(set(company))):
doctopic_grouped[i, :] = np.mean(doctopic[company == name, :], axis=0)
doctopic = doctopic_grouped
print("\n" + " |---------------------------------------------------------------------|")
print(" " + "|" + "-" * 14 + "Topic shares associated with each company" + "-" * 14 +"|")
print(" |---------------------------------------------------------------------|")
print(" | T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 |")
print(" |---------------------------------------------------------------------|")
print(np.round(doctopic,3))
print(" -----------------------------------------------------------------------")
### Visualization - Heatmap ###
N, K = doctopic.shape
company_names = name_of_company(company) # get companies' name, test 4
topic_labels = ['Topic #{}'.format(k) for k in range(K)] # Numbering topics
plt.pcolor(doctopic, norm=None, cmap='Blues') # Heat map
plt.yticks(np.arange(doctopic.shape[0])+0.5, company_names) # y-axis
plt.xticks(np.arange(doctopic.shape[1])+0.5, topic_labels) # x-axis
plt.gca().invert_yaxis() # flip the y-axis
plt.xticks(rotation=45) # rotate the ticks on the x-axis 45 degrees
plt.colorbar(cmap='Blues') # add a legend
plt.tight_layout() # fix margins
plt.show() # print the heatmap
### Visualization - topic distance ###
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
dist = 1 - cosine_similarity(dtm) # distance
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=0)
pos = mds.fit_transform(dist)
xs, ys = pos[:, 0], pos[:, 1]
for x, y, name in zip(xs, ys, company_names):
plt.scatter(x, y)
plt.text(x, y, name)
plt.show()
if __name__ == "__main__":
run()
| mit |
manulera/ModellingCourse | ReAct/Python/GenerateMasterEq.py | 1 | 1118 |
import numpy as np
from Gilles import *
import matplotlib.pyplot as plt
from DeviationAnalysis import *
from mpl_toolkits.mplot3d import Axes3D
# Initial conditions
user_input = ['A', 100,
'B', 0]
# Constants (this is not necessary, they could be filled up already in the reaction tuple)
k = (10,10)
# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)
reactions = (
(1,'A'),(1,'B'),k[0],
(1,'B'),(1,'A'),k[1],
)
# dt is used for the deterministic calculation, and the
dt=0.0001
t = np.arange(0, 0.6, dt)
(solution,(tgill, valsgill,all_mus, all_taus),rows,mode) = ReAct(user_input,reactions,t,rounds=300)
fig = plt.figure()
Gillesplot(solution,t,tgill, valsgill,rows,mode)
j=0
f, axarr = plt.subplots(1, 10)
for i in np.arange(0,0.3,0.03):
A,X,Y = EquationMaker(reactions,tgill,all_mus, all_taus,i,i+0.02)
Y,X=np.meshgrid(Y,X)
#ax = fig.gca(projection='3d')
#ax.plot_surface(X,Y,A, rstride=1, cstride=1, cmap='hot', linewidth=0, antialiased=False)
axarr[j].imshow(A[:5,:], cmap='hot')
j+=1
plt.draw()
plt.show() | gpl-3.0 |
MohammedWasim/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
antoinecarme/pyaf | tests/temporal_hierarchy/test_temporal_demo_hourly_H_6H_12H_D_W.py | 1 | 1546 | # %matplotlib inline
import pyaf
import numpy as np
import pandas as pd
DATA_FREQ = 'H'
PERIODS = ["H" , "6H" , "12H" , "D" , "W"]
H = 365
N = H * 10
lDateColumn = "Date"
lSignalVar = "Signal";
START_TIME = "2001-01-25"
# generate a daily signal covering one year 2016 in a pandas dataframe
np.random.seed(seed=1960)
df_train = pd.DataFrame({lDateColumn : pd.date_range(start=START_TIME, periods=N, freq=DATA_FREQ),
lSignalVar : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
lHierarchy = {};
lHierarchy['Levels'] = None;
lHierarchy['Data'] = None;
lHierarchy['Groups']= {};
lHierarchy['Periods']= PERIODS
lHierarchy['Type'] = "Temporal";
# create a model to plot the hierarchy.
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
lSignalHierarchy = lEngine.plot_Hierarchy(df_train , lDateColumn, lSignalVar, H,
lHierarchy, None);
# print(lSignalHierarchy.__dict__)
# create a hierarchical model and train it
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
# lEngine.mOptions.mNbCores = 1
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lSignalHierarchy = lEngine.train(df_train , lDateColumn, lSignalVar, H, lHierarchy, None);
lEngine.getModelInfo();
dfapp_in = df_train.copy();
dfapp_in.info()
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.info()
print(dfapp_out.tail())
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
jpautom/scikit-learn | sklearn/tests/test_cross_validation.py | 20 | 46586 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
kmather73/zipline | zipline/gens/tradesimulation.py | 9 | 15130 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib2 import ExitStack
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.utils.api_support import ZiplineAPI
from zipline.finance import trading
from zipline.protocol import (
BarData,
SIDData,
DATASOURCE_TYPE
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = normalize_date(self.sim_params.first_open)
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_open = self.algo.perf_tracker.market_open
mkt_close = self.algo.perf_tracker.market_close
# inject the current algo
# snapshot time to any log record generated.
with ExitStack() as stack:
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
data_frequency = self.sim_params.data_frequency
self._call_before_trading_start(mkt_open)
for date, snapshot in stream_in:
self.simulation_dt = date
self.on_dt_changed(date)
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
elif event.type == DATASOURCE_TYPE.TRADE:
self.update_universe(event)
self.algo.perf_tracker.process_trade(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
self.update_universe(event)
else:
messages = self._process_snapshot(
date,
snapshot,
self.algo.instant_fill,
)
# Perf messages are only emitted if the snapshot contained
# a benchmark event.
for message in messages:
yield message
# When emitting minutely, we need to call
# before_trading_start before the next trading day begins
if date == mkt_close:
if mkt_close <= self.algo.perf_tracker.last_close:
before_last_close = \
mkt_close < self.algo.perf_tracker.last_close
try:
mkt_open, mkt_close = \
trading.environment \
.next_open_and_close(mkt_close)
except trading.NoFurtherDataError:
# If at the end of backtest history,
# skip advancing market close.
pass
if before_last_close:
self._call_before_trading_start(mkt_open)
elif data_frequency == 'daily':
next_day = trading.environment.next_trading_day(date)
if next_day is not None and \
next_day < self.algo.perf_tracker.last_close:
self._call_before_trading_start(next_day)
self.algo.portfolio_needs_update = True
self.algo.account_needs_update = True
self.algo.performance_needs_update = True
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def _process_snapshot(self, dt, snapshot, instant_fill):
"""
Process a stream of events corresponding to a single datetime, possibly
returning a perf message to be yielded.
If @instant_fill = True, we delay processing of events until after the
user's call to handle_data, and we process the user's placed orders
before the snapshot's events. Note that this introduces a lookahead
bias, since the user effectively is effectively placing orders that are
filled based on trades that happened prior to the call the handle_data.
If @instant_fill = False, we process Trade events before calling
handle_data. This means that orders are filled based on trades
occurring in the next snapshot. This is the more conservative model,
and as such it is the default behavior in TradingAlgorithm.
"""
# Flags indicating whether we saw any events of type TRADE and type
# BENCHMARK. Respectively, these control whether or not handle_data is
# called for this snapshot and whether we emit a perf message for this
# snapshot.
any_trade_occurred = False
benchmark_event_occurred = False
if instant_fill:
events_to_be_processed = []
# Assign process events to variables to avoid attribute access in
# innermost loops.
#
# Done here, to allow for perf_tracker or blotter to be swapped out
# or changed in between snapshots.
perf_process_trade = self.algo.perf_tracker.process_trade
perf_process_transaction = self.algo.perf_tracker.process_transaction
perf_process_order = self.algo.perf_tracker.process_order
perf_process_benchmark = self.algo.perf_tracker.process_benchmark
perf_process_split = self.algo.perf_tracker.process_split
perf_process_dividend = self.algo.perf_tracker.process_dividend
perf_process_commission = self.algo.perf_tracker.process_commission
perf_process_close_position = \
self.algo.perf_tracker.process_close_position
blotter_process_trade = self.algo.blotter.process_trade
blotter_process_benchmark = self.algo.blotter.process_benchmark
# Containers for the snapshotted events, so that the events are
# processed in a predictable order, without relying on the sorted order
# of the individual sources.
# There is only one benchmark per snapshot, will be set to the current
# benchmark iff it occurs.
benchmark = None
# trades and customs are initialized as a list since process_snapshot
# is most often called on market bars, which could contain trades or
# custom events.
trades = []
customs = []
closes = []
# splits and dividends are processed once a day.
#
# The avoidance of creating the list every time this is called is more
# to attempt to show that this is the infrequent case of the method,
# since the performance benefit from deferring the list allocation is
# marginal. splits list will be allocated when a split occurs in the
# snapshot.
splits = None
# dividends list will be allocated when a dividend occurs in the
# snapshot.
dividends = None
for event in snapshot:
if event.type == DATASOURCE_TYPE.TRADE:
trades.append(event)
elif event.type == DATASOURCE_TYPE.BENCHMARK:
benchmark = event
elif event.type == DATASOURCE_TYPE.SPLIT:
if splits is None:
splits = []
splits.append(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
customs.append(event)
elif event.type == DATASOURCE_TYPE.DIVIDEND:
if dividends is None:
dividends = []
dividends.append(event)
elif event.type == DATASOURCE_TYPE.CLOSE_POSITION:
closes.append(event)
else:
raise log.warn("Unrecognized event=%s".format(event))
# Handle benchmark first.
#
# Internal broker implementation depends on the benchmark being
# processed first so that transactions and commissions reported from
# the broker can be injected.
if benchmark is not None:
benchmark_event_occurred = True
perf_process_benchmark(benchmark)
for txn, order in blotter_process_benchmark(benchmark):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
for trade in trades:
self.update_universe(trade)
any_trade_occurred = True
if instant_fill:
events_to_be_processed.append(trade)
else:
for txn, order in blotter_process_trade(trade):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
perf_process_trade(trade)
for custom in customs:
self.update_universe(custom)
for close in closes:
self.update_universe(close)
perf_process_close_position(close)
if splits is not None:
for split in splits:
# process_split is not assigned to a variable since it is
# called rarely compared to the other event processors.
self.algo.blotter.process_split(split)
perf_process_split(split)
if dividends is not None:
for dividend in dividends:
perf_process_dividend(dividend)
if any_trade_occurred:
new_orders = self._call_handle_data()
for order in new_orders:
perf_process_order(order)
if instant_fill:
# Now that handle_data has been called and orders have been placed,
# process the event stream to fill user orders based on the events
# from this snapshot.
for trade in events_to_be_processed:
for txn, order in blotter_process_trade(trade):
if txn is not None:
perf_process_transaction(txn)
if order is not None:
perf_process_order(order)
perf_process_trade(trade)
if benchmark_event_occurred:
return self.generate_messages(dt)
else:
return ()
def _call_handle_data(self):
"""
Call the user's handle_data, returning any orders placed by the algo
during the call.
"""
self.algo.event_manager.handle_data(
self.algo,
self.current_data,
self.simulation_dt,
)
orders = self.algo.blotter.new_orders
self.algo.blotter.new_orders = []
return orders
def _call_before_trading_start(self, dt):
dt = normalize_date(dt)
self.simulation_dt = dt
self.on_dt_changed(dt)
self.algo.before_trading_start(self.current_data)
def on_dt_changed(self, dt):
if self.algo.datetime != dt:
self.algo.on_dt_changed(dt)
def generate_messages(self, dt):
"""
Generator that yields perf messages for the given datetime.
"""
# Ensure that updated_portfolio has been called at least once for this
# dt before we emit a perf message. This is a no-op if
# updated_portfolio has already been called this dt.
self.algo.updated_portfolio()
self.algo.updated_account()
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close_daily()
perf_message['daily_perf']['recorded_vars'] = rvars
yield perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
# close the minute in the tracker, and collect the daily message if
# the minute is the close of the trading day
minute_message, daily_message = \
self.algo.perf_tracker.handle_minute_close(dt)
# collect and yield the minute's perf message
minute_message['minute_perf']['recorded_vars'] = rvars
yield minute_message
# if there was a daily perf message, collect and yield it
if daily_message:
daily_message['daily_perf']['recorded_vars'] = rvars
yield daily_message
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
# rather than use if event.sid in ..., just trying
# and handling the exception is significantly faster
try:
sid_data = self.current_data[event.sid]
except KeyError:
sid_data = self.current_data[event.sid] = SIDData(event.sid)
sid_data.__dict__.update(event.__dict__)
| apache-2.0 |
dgary50/eovsa | adc_cal.py | 1 | 9411 | #
# Routines to set up system for ADC calibration/DCM attenuation setting
#
# 2016-Feb-20 DG
# First written.
#
import time
import numpy as np
import roach as r
def acc_tune(band):
if type(band) is int:
fsqfile = 'BAND'+str(band)+'.FSQ'
elif type(band) is str:
if band.lower() == 'solar.fsq' or band.lower() == 'pcal.fsq':
fsqfile = band.lower()
else:
print 'Error: Unknown band',band
return
cmds = ['FSEQ-OFF','FSEQ-INIT','WAIT','FSEQ-FILE '+fsqfile.lower(), 'FSEQ-ON']
send_cmds(cmds)
def send_cmds(cmds):
''' Sends a series of commands to ACC. The sequence of commands
is not checked for validity!
cmds a list of strings, each of which must be a valid command
'''
import socket, stateframe
try:
accini = stateframe.rd_ACCfile()
except:
print 'Error: Could not access ACC.'
return
for cmd in cmds:
print 'Command:',cmd
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((accini['host'],accini['scdport']))
s.send(cmd)
time.sleep(0.01)
s.close()
except:
print 'Error: Could not send command',cmd,' to ACC.'
return
# Insert 62 dB into FEMs, cycle through bands, get ADC levels (optionally plot results)
# Insert nominal dB into FEMs, cycle through bands, get ADC levels (optionally plot results)
# Turn on ND in FEMs, cycle through bands, get ADC levels (optionally plot results)
# Use ADC level tests to "guess" best DCM attenuation settings
def adc_cal(roach_list,ant_list='ant1-15',do_plot=False):
''' Perform a sequence of FEM settings, using ADC levels to
deduce optimum DCM attenuation settings for all 34 bands.
This can also reveal problems in FEM or DCM hardware.
TAKES ABOUT 17 MINUTES TO COMPLETE
roach_list a set of ROACH objects created with roach.py
ant_list a list of antennas in the form of a string,
e.g. "ant1-5 ant7" on which to adjust FEMs
Default is all antennas, and an empty string
means all antennas in current subarray.
do_plot if True, makes a summary plot of results
Returns numpy arrays :
adc_nosig[34, nroach, 4] (no-signal ADC levels)
adc_ndoff[34, nroach, 4] (ADC levels for ND-OFF)
adc_ndon [34, nroach, 4] (ADC levels for ND-ON)
'''
n = len(roach_list)
adc_nosig = np.zeros((34,n,4),dtype='float')
adc_ndoff = np.zeros((34,n,4),dtype='float')
adc_ndon = np.zeros((34,n,4),dtype='float')
# Set DCM state to standard values
send_cmds(['DCMAUTO-OFF '+ant_list,'DCMATTN 12 12 '+ant_list])
# Set FEM attenuations to maximum
send_cmds(['FEMATTN 15 '+ant_list])
# Cycle through bands to get "zero-input" ADC levels
for band in range(34):
acc_tune(band+1)
time.sleep(3)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_nosig[band,i] = ro.adc_levels
# Set FEM attenuations to nominal
send_cmds(['FEMATTN 0 '+ant_list])
# Cycle through bands to get "nd-on" ADC levels
send_cmds(['ND-ON '+ant_list])
for band in range(34):
acc_tune(band+1)
time.sleep(3)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_ndon[band,i] = ro.adc_levels
# Cycle through bands to get "nd-off" ADC levels
send_cmds(['ND-OFF '+ant_list])
for band in range(34):
acc_tune(band+1)
time.sleep(3)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_ndoff[band,i] = ro.adc_levels
if do_plot:
plot_adc_cal(roach_list, adc_nosig, adc_ndoff, adc_ndon)
return adc_nosig, adc_ndoff, adc_ndon
def plot_adc_cal(roach_list,adc_nosig,adc_ndoff,adc_ndon):
import matplotlib.pylab as plt
n = len(roach_list)
chans = ['X','Y']
f, ax = plt.subplots(n,4)
f.set_size_inches(10,2.5*n, forward=True)
for i in range(n):
rstr = 'Roach'+roach_list[i].roach_ip[5:6]
for j in range(4):
ant = roach_list[i].ants[j / 2]
chan = chans[j % 2]
astr = ' Ant '+str(ant)+chan+':'
ax[i,j].plot(adc_nosig[:,i,j],'.')
ax[i,j].plot(adc_ndoff[:,i,j],'.')
ax[i,j].plot(adc_ndon[:,i,j],'.')
ax[i,j].set_ylim(0, 60)
ax[i,j].text(5,50,rstr+astr,fontsize=10)
plt.show()
def make_DCM_table(roach_list,adc_ndon,dcm_base=12,adc_nom=30):
DCMlines = []
DCMlines.append(' Ant1 Ant2 Ant3 Ant4 Ant5 Ant6 Ant7 Ant8 Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
DCMlines.append(' X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y')
DCMlines.append(' ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
for band in range(1,35):
out = np.zeros(32,dtype='int') + 12 # Default to 12 dB if not present
for i in range(len(roach_list)):
# Calculate DCM attenuation for the 4 channgels on this roach at this band
# The target standard deviation is adc_nom (default is 30), and the base
# attenuation at which the observations were made is dcm_base (default is 12 dB).
# This uses the ratio of standard deviations to determine the factor in dB
# needed to change it to the target standard deviation. The division by two,
# conversion to integer, and multiplication by 2 is because the attenuation steps
# are in units of 2 dB. The result is clipped to be between 0 and 30 dB.
ch_atn = np.clip(((10*np.log(adc_ndon[band-1,i,:]/adc_nom)+dcm_base + 1)/2).astype('int')*2,0,30)
# Determine the two antennas on this roach (-1 converts to 0-based index)
ant1,ant2 = roach_list[i].ants - 1
# Use indexes to assign the 4 channels to the right place in the array
out[np.array((ant1*2,ant1*2+1,ant2*2,ant2*2+1))] = ch_atn
DCMlines.append('{:2} : {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*out[:30]))
return DCMlines
def adc_check(roach_list,dcmlines,ant_list='ant1-15',do_plot=False):
''' Perform a sequence of FEM settings, using ADC levels to
deduce optimum DCM attenuation settings for all 34 bands.
This can also reveal problems in FEM or DCM hardware.
TAKES ABOUT 17 MINUTES TO COMPLETE
roach_list a set of ROACH objects created with roach.py
ant_list a list of antennas in the form of a string,
e.g. "ant1-5 ant7" on which to adjust FEMs
Default is all antennas, and an empty string
means all antennas in current subarray.
do_plot if True, makes a summary plot of results
Returns numpy arrays :
adc_nosig[34, nroach, 4] (no-signal ADC levels)
adc_ndoff[34, nroach, 4] (ADC levels for ND-OFF)
adc_ndon [34, nroach, 4] (ADC levels for ND-ON)
'''
n = len(roach_list)
adc_nosig = np.zeros((34,n,4),dtype='float')
adc_ndoff = np.zeros((34,n,4),dtype='float')
adc_ndon = np.zeros((34,n,4),dtype='float')
# Set DCM state to standard values
send_cmds(['DCMAUTO-OFF '+ant_list,'DCMATTN 12 12 '+ant_list])
# Set FEM attenuations to maximum
send_cmds(['FEMATTN 15 '+ant_list])
# Cycle through bands to get "zero-input" ADC levels
for band in range(34):
acc_tune(band+1)
line = dcmlines[band+3]
for ant in range(1,16):
send_cmds(['DCMATTN'+line[ant*6-1:(ant+1)*6-1]+' ant'+str(ant)])
time.sleep(1)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_nosig[band,i] = ro.adc_levels
# Set FEM attenuations to nominal
send_cmds(['FEMATTN 0 '+ant_list])
# Cycle through bands to get "nd-on" ADC levels
send_cmds(['ND-ON '+ant_list])
for band in range(34):
acc_tune(band+1)
line = dcmlines[band+3]
for ant in range(1,16):
send_cmds(['DCMATTN'+line[ant*6-1:(ant+1)*6-1]+' ant'+str(ant)])
time.sleep(1)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_ndon[band,i] = ro.adc_levels
# Cycle through bands to get "nd-off" ADC levels
send_cmds(['ND-OFF '+ant_list])
for band in range(34):
acc_tune(band+1)
line = dcmlines[band+3]
for ant in range(1,16):
send_cmds(['DCMATTN'+line[ant*6-1:(ant+1)*6-1]+' ant'+str(ant)])
time.sleep(1)
r.adc_levels(roach_list)
for i,ro in enumerate(roach_list):
adc_ndoff[band,i] = ro.adc_levels
if do_plot:
plot_adc_cal(roach_list, adc_nosig, adc_ndoff, adc_ndon)
return adc_nosig, adc_ndoff, adc_ndon
| gpl-2.0 |
gbrammer/grizli | grizli/grismconf.py | 1 | 21571 | """
Demonstrate aXe trace polynomials.
Initial code taken from `(Brammer, Pirzkal, & Ryan 2014) <https://github.com/WFC3Grism/CodeDescription>`_, which contains a detailed
explanation how the grism configuration parameters and coefficients are defined and evaluated.
"""
import os
import numpy as np
from . import GRIZLI_PATH
class aXeConf():
def __init__(self, conf_file='WFC3.IR.G141.V2.5.conf'):
"""Read an aXe-compatible configuration file
Parameters
----------
conf_file: str
Filename of the configuration file to read
"""
if conf_file is not None:
self.conf = self.read_conf_file(conf_file)
self.conf_file = conf_file
self.count_beam_orders()
# Global XOFF/YOFF offsets
if 'XOFF' in self.conf.keys():
self.xoff = np.float(self.conf['XOFF'])
else:
self.xoff = 0.
if 'YOFF' in self.conf.keys():
self.yoff = np.float(self.conf['YOFF'])
else:
self.yoff = 0.
def read_conf_file(self, conf_file='WFC3.IR.G141.V2.5.conf'):
"""Read an aXe config file, convert floats and arrays
Parameters
----------
conf_file: str
Filename of the configuration file to read.
Parameters are stored in an OrderedDict in `self.conf`.
"""
from collections import OrderedDict
conf = OrderedDict()
fp = open(conf_file)
lines = fp.readlines()
fp.close()
for line in lines:
# empty / commented lines
if (line.startswith('#')) | (line.strip() == '') | ('"' in line):
continue
# split the line, taking out ; and # comments
spl = line.split(';')[0].split('#')[0].split()
param = spl[0]
if len(spl) > 2:
value = np.cast[float](spl[1:])
else:
try:
value = float(spl[1])
except:
value = spl[1]
conf[param] = value
return conf
def count_beam_orders(self):
"""Get the maximum polynomial order in DYDX or DLDP for each beam
"""
self.orders = {}
for beam in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']:
order = 0
while 'DYDX_{0:s}_{1:d}'.format(beam, order) in self.conf.keys():
order += 1
while 'DLDP_{0:s}_{1:d}'.format(beam, order) in self.conf.keys():
order += 1
self.orders[beam] = order-1
def get_beams(self):
"""Get beam parameters and read sensitivity curves
"""
import os
from collections import OrderedDict
from astropy.table import Table, Column
self.dxlam = OrderedDict()
self.nx = OrderedDict()
self.sens = OrderedDict()
self.beams = []
for beam in self.orders:
if self.orders[beam] > 0:
self.beams.append(beam)
self.dxlam[beam] = np.arange(self.conf['BEAM{0}'.format(beam)].min(), self.conf['BEAM{0}'.format(beam)].max(), dtype=int)
self.nx[beam] = int(self.dxlam[beam].max()-self.dxlam[beam].min())+1
self.sens[beam] = Table.read('{0}/{1}'.format(os.path.dirname(self.conf_file), self.conf['SENSITIVITY_{0}'.format(beam)]))
#self.sens[beam].wave = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
#self.sens[beam].sens = np.cast[np.double](self.sens[beam]['SENSITIVITY'])
# Need doubles for interpolating functions
for col in self.sens[beam].colnames:
data = np.cast[np.double](self.sens[beam][col])
self.sens[beam].remove_column(col)
self.sens[beam].add_column(Column(data=data, name=col))
# Scale BEAM F
if (beam == 'F') & ('G141' in self.conf_file):
self.sens[beam]['SENSITIVITY'] *= 0.35
if (beam == 'B') & ('G141' in self.conf_file):
if self.conf['SENSITIVITY_B'] == 'WFC3.IR.G141.0th.sens.1.fits':
self.sens[beam]['SENSITIVITY'] *= 2
# wave = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
# sens = np.cast[np.double](self.sens[beam]['SENSITIVITY']
# self.sens[beam]['WAVELENGTH'] = np.cast[np.double](self.sens[beam]['WAVELENGTH'])
# self.sens[beam]['SENSITIVITY'] = )
self.beams.sort()
def field_dependent(self, xi, yi, coeffs):
"""aXe field-dependent coefficients
See the `aXe manual <http://axe.stsci.edu/axe/manual/html/node7.html#SECTION00721200000000000000>`_ for a description of how the field-dependent coefficients are specified.
Parameters
----------
xi, yi : float or array-like
Coordinate to evaluate the field dependent coefficients, where
`xi = x-REFX` and `yi = y-REFY`.
coeffs : array-like
Field-dependency coefficients
Returns
-------
a : float or array-like
Evaluated field-dependent coefficients
"""
# number of coefficients for a given polynomial order
# 1:1, 2:3, 3:6, 4:10, order:order*(order+1)/2
if isinstance(coeffs, float):
order = 1
else:
order = int(-1+np.sqrt(1+8*len(coeffs))) // 2
# Build polynomial terms array
# $a = a_0+a_1x_i+a_2y_i+a_3x_i^2+a_4x_iy_i+a_5yi^2+$ ...
xy = []
for p in range(order):
for px in range(p+1):
# print 'x**%d y**%d' %(p-px, px)
xy.append(xi**(p-px)*yi**(px))
# Evaluate the polynomial, allowing for N-dimensional inputs
a = np.sum((np.array(xy).T*coeffs).T, axis=0)
return a
def evaluate_dp(self, dx, dydx):
"""Evalate arc length along the trace given trace polynomial coefficients
Parameters
----------
dx : array-like
x pixel to evaluate
dydx : array-like
Coefficients of the trace polynomial
Returns
-------
dp : array-like
Arc length along the trace at position `dx`.
For `dydx` polynomial orders 0, 1 or 2, integrate analytically.
Higher orders must be integrated numerically.
**Constant:**
.. math:: dp = dx
**Linear:**
.. math:: dp = \sqrt{1+\mathrm{DYDX}[1]}\cdot dx
**Quadratic:**
.. math:: u = \mathrm{DYDX}[1] + 2\ \mathrm{DYDX}[2]\cdot dx
.. math:: dp = (u \sqrt{1+u^2} + \mathrm{arcsinh}\ u) / (4\cdot \mathrm{DYDX}[2])
"""
# dp is the arc length along the trace
# $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
poly_order = len(dydx)-1
if (poly_order == 2):
if np.abs(np.unique(dydx[2])).max() == 0:
poly_order = 1
if poly_order == 0: # dy=0
dp = dx
elif poly_order == 1: # constant dy/dx
dp = np.sqrt(1+dydx[1]**2)*(dx)
elif poly_order == 2: # quadratic trace
u0 = dydx[1]+2*dydx[2]*(0)
dp0 = (u0*np.sqrt(1+u0**2)+np.arcsinh(u0))/(4*dydx[2])
u = dydx[1]+2*dydx[2]*(dx)
dp = (u*np.sqrt(1+u**2)+np.arcsinh(u))/(4*dydx[2])-dp0
else:
# high order shape, numerical integration along trace
# (this can be slow)
xmin = np.minimum((dx).min(), 0)
xmax = np.maximum((dx).max(), 0)
xfull = np.arange(xmin, xmax)
dyfull = 0
for i in range(1, poly_order):
dyfull += i*dydx[i]*(xfull-0.5)**(i-1)
# Integrate from 0 to dx / -dx
dpfull = xfull*0.
lt0 = xfull < 0
if lt0.sum() > 1:
dpfull[lt0] = np.cumsum(np.sqrt(1+dyfull[lt0][::-1]**2))[::-1]
dpfull[lt0] *= -1
#
gt0 = xfull > 0
if gt0.sum() > 0:
dpfull[gt0] = np.cumsum(np.sqrt(1+dyfull[gt0]**2))
dp = np.interp(dx, xfull, dpfull)
if dp[-1] == dp[-2]:
dp[-1] = dp[-2]+np.diff(dp)[-2]
return dp
def get_beam_trace(self, x=507, y=507, dx=0., beam='A', fwcpos=None):
"""Get an aXe beam trace for an input reference pixel and list of output x pixels `dx`
Parameters
----------
x, y : float or array-like
Evaluate trace definition at detector coordinates `x` and `y`.
dx : float or array-like
Offset in x pixels from `(x,y)` where to compute trace offset and
effective wavelength
beam : str
Beam name (i.e., spectral order) to compute. By aXe convention,
`beam='A'` is the first order, 'B' is the zeroth order and
additional beams are the higher positive and negative orders.
fwcpos : None or float
For NIRISS, specify the filter wheel position to compute the
trace rotation
Returns
-------
dy : float or array-like
Center of the trace in y pixels offset from `(x,y)` evaluated at
`dx`.
lam : float or array-like
Effective wavelength along the trace evaluated at `dx`.
"""
NORDER = self.orders[beam]+1
xi, yi = x-self.xoff, y-self.yoff
xoff_beam = self.field_dependent(xi, yi, self.conf['XOFF_{0}'.format(beam)])
yoff_beam = self.field_dependent(xi, yi, self.conf['YOFF_{0}'.format(beam)])
# y offset of trace (DYDX)
dydx = np.zeros(NORDER) # 0 #+1.e-80
dydx = [0]*NORDER
for i in range(NORDER):
if 'DYDX_{0:s}_{1:d}'.format(beam, i) in self.conf.keys():
coeffs = self.conf['DYDX_{0:s}_{1:d}'.format(beam, i)]
dydx[i] = self.field_dependent(xi, yi, coeffs)
# $dy = dydx_0+dydx_1 dx+dydx_2 dx^2+$ ...
dy = yoff_beam
for i in range(NORDER):
dy += dydx[i]*(dx-xoff_beam)**i
# wavelength solution
dldp = np.zeros(NORDER)
dldp = [0]*NORDER
for i in range(NORDER):
if 'DLDP_{0:s}_{1:d}'.format(beam, i) in self.conf.keys():
coeffs = self.conf['DLDP_{0:s}_{1:d}'.format(beam, i)]
dldp[i] = self.field_dependent(xi, yi, coeffs)
self.eval_input = {'x': x, 'y': y, 'beam': beam, 'dx': dx,
'fwcpos': fwcpos}
self.eval_output = {'xi': xi, 'yi': yi, 'dldp': dldp, 'dydx': dydx,
'xoff_beam': xoff_beam, 'yoff_beam': yoff_beam,
'dy': dy}
dp = self.evaluate_dp(dx-xoff_beam, dydx)
# ## dp is the arc length along the trace
# ## $\lambda = dldp_0 + dldp_1 dp + dldp_2 dp^2$ ...
# if self.conf['DYDX_ORDER_%s' %(beam)] == 0: ## dy=0
# dp = dx-xoff_beam
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 1: ## constant dy/dx
# dp = np.sqrt(1+dydx[1]**2)*(dx-xoff_beam)
# elif self.conf['DYDX_ORDER_%s' %(beam)] == 2: ## quadratic trace
# u0 = dydx[1]+2*dydx[2]*(0)
# dp0 = (u0*np.sqrt(1+u0**2)+np.arcsinh(u0))/(4*dydx[2])
# u = dydx[1]+2*dydx[2]*(dx-xoff_beam)
# dp = (u*np.sqrt(1+u**2)+np.arcsinh(u))/(4*dydx[2])-dp0
# else:
# ## high order shape, numerical integration along trace
# ## (this can be slow)
# xmin = np.minimum((dx-xoff_beam).min(), 0)
# xmax = np.maximum((dx-xoff_beam).max(), 0)
# xfull = np.arange(xmin, xmax)
# dyfull = 0
# for i in range(1, NORDER):
# dyfull += i*dydx[i]*(xfull-0.5)**(i-1)
#
# ## Integrate from 0 to dx / -dx
# dpfull = xfull*0.
# lt0 = xfull <= 0
# if lt0.sum() > 1:
# dpfull[lt0] = np.cumsum(np.sqrt(1+dyfull[lt0][::-1]**2))[::-1]
# dpfull[lt0] *= -1
# #
# gt0 = xfull >= 0
# if gt0.sum() > 0:
# dpfull[gt0] = np.cumsum(np.sqrt(1+dyfull[gt0]**2))
#
# dp = np.interp(dx-xoff_beam, xfull, dpfull)
# Evaluate dldp
lam = dp*0.
for i in range(NORDER):
lam += dldp[i]*dp**i
# NIRISS rotation?
if fwcpos is not None:
if 'FWCPOS_REF' not in self.conf.keys():
print('Parameter fwcpos={0} supplied but no FWCPOS_REF in {1:s}'.format(fwcpos, self.conf_file))
return dy, lam
order = self.conf['DYDX_ORDER_{0}'.format(beam)]
if order != 2:
print('ORDER={0:d} not supported for NIRISS rotation'.format(order))
return dy, lam
theta = (fwcpos - self.conf['FWCPOS_REF'])/180*np.pi*1
theta *= -1 # DMS rotation
# print('DMS')
if theta == 0:
return dy, lam
# For the convention of swapping/inverting axes for GR150C
# if 'GR150C' in self.conf_file:
# theta = -theta
# If theta is small, use a small angle approximation.
# Otherwise, 1./tan(theta) blows up and results in numerical
# noise.
xp = (dx-xoff_beam)/np.cos(theta)
if (1-np.cos(theta) < 5.e-8):
#print('Approximate!', xoff_beam, np.tan(theta))
dy = dy + (dx-xoff_beam)*np.tan(theta)
delta = 0.
# print('Approx')
else:
# Full transformed trace coordinates
c = dydx
#print('Not approx')
beta = c[1]+2*c[2]*xp-1/np.tan(theta)
chi = c[0]+c[1]*xp+c[2]*xp**2
if theta < 0:
psi = (-beta+np.sqrt(beta**2-4*c[2]*chi))
psi *= 1./2/c[2]/np.tan(theta)
delta = psi*np.tan(theta)
dy = dx*np.tan(theta) + psi/np.cos(theta)
else:
psi = (-beta-np.sqrt(beta**2-4*c[2]*chi))
psi *= 1./2/c[2]/np.tan(theta)
delta = psi*np.tan(theta)
dy = dx*np.tan(theta) + psi/np.cos(theta)
# Evaluate wavelength at 'prime position along the trace
dp = self.evaluate_dp(xp+delta, dydx)
lam = dp*0.
for i in range(NORDER):
lam += dldp[i]*dp**i
return dy, lam
def show_beams(self, beams=['E', 'D', 'C', 'B', 'A']):
"""
Make a demo plot of the beams of a given configuration file
"""
import matplotlib.pyplot as plt
x0, x1 = 507, 507
dx = np.arange(-800, 1200)
if 'WFC3.UV' in self.conf_file:
x0, x1 = 2073, 250
dx = np.arange(-1200, 1200)
if 'G800L' in self.conf_file:
x0, x1 = 2124, 1024
dx = np.arange(-1200, 1200)
s = 200 # marker size
fig = plt.figure(figsize=[10, 3])
plt.scatter(0, 0, marker='s', s=s, color='black', edgecolor='0.8',
label='Direct')
for beam in beams:
if 'XOFF_{0}'.format(beam) not in self.conf.keys():
continue
xoff = self.field_dependent(x0, x1, self.conf['XOFF_{0}'.format(beam)])
dy, lam = self.get_beam_trace(x0, x1, dx=dx, beam=beam)
xlim = self.conf['BEAM{0}'.format(beam)]
ok = (dx >= xlim[0]) & (dx <= xlim[1])
plt.scatter(dx[ok]+xoff, dy[ok], c=lam[ok]/1.e4, marker='s', s=s,
alpha=0.5, edgecolor='None')
plt.text(np.median(dx[ok]), np.median(dy[ok])+1, beam,
ha='center', va='center', fontsize=14)
print('Beam {0}, lambda=({1:.1f} - {2:.1f})'.format(beam, lam[ok].min(), lam[ok].max()))
plt.grid()
plt.xlabel(r'$\Delta x$')
plt.ylabel(r'$\Delta y$')
cb = plt.colorbar(pad=0.01, fraction=0.05)
cb.set_label(r'$\lambda\,(\mu\mathrm{m})$')
plt.title(self.conf_file)
plt.tight_layout()
plt.savefig('{0}.pdf'.format(self.conf_file))
def get_config_filename(instrume='WFC3', filter='F140W',
grism='G141', module=None, chip=1):
"""Generate a config filename based on the instrument, filter & grism combination.
Config files assumed to be found the directory specified by the `$GRIZLI`
environment variable, i.e., `${GRIZLI}/CONF`.
Parameters
----------
instrume : {'ACS', 'WFC3', 'NIRISS', 'NIRCam', 'WFIRST'}
Instrument used
filter : str
Direct image filter. This is only used for WFC3/IR, where the grism
configuration files have been determined for each direct+grism
combination separately based on the filter wedge offsets of the
filters.
grism : str
Grism name. Valid combinations are the following:
ACS : G800L (assumed)
WFC3 : G102, G141
NIRISS : GR150R, GR150C
NIRCam : F322W2, F356W, F430M, F444W, F460M
WFIRST : (basic assumptions about the WFI grism)
chip : int
For ACS/WFC and UVIS, specifies the chip to use. Note that this
is switched with respect to the header EXTNAME extensions:
EXTVER = 1 is extension 1 / (SCI,1) of the flt/flc files but
corresponds to CCDCHIP = 2 and the ACS.WFC3.CHIP2 config files.
and
EXTVER = 2 is extension 4 / (SCI,2) of the flt/flc files but
corresponds to CCDCHIP = 1 and the ACS.WFC3.CHIP1 config files.
Returns
-------
conf_file : str
String path of the configuration file.
"""
if instrume == 'ACS':
conf_file = os.path.join(GRIZLI_PATH,
'CONF/ACS.WFC.CHIP{0:d}.Stars.conf'.format(chip))
if not os.path.exists(conf_file):
conf_file = os.path.join(GRIZLI_PATH,
'CONF/ACS.WFC.CHIP{0:d}.Cycle13.5.conf'.format(chip))
if instrume == 'WFC3':
if grism == 'G280':
conf_file = os.path.join(GRIZLI_PATH, 'CONF/G280/',
'WFC3.UVIS.G280.cal/WFC3.UVIS.G280.CHIP{0:d}.V2.0.conf'.format(chip))
return conf_file
conf_file = os.path.join(GRIZLI_PATH,
'CONF/{0}.{1}.V4.32.conf'.format(grism, filter))
# When direct + grism combination not found for WFC3 assume F140W
if not os.path.exists(conf_file):
conf_file = os.path.join(GRIZLI_PATH,
'CONF/{0}.{1}.V4.32.conf'.format(grism, 'F140W'))
if instrume == 'NIRISS':
conf_file = os.path.join(GRIZLI_PATH,
'CONF/{0}.{1}.conf'.format(grism, filter))
if not os.path.exists(conf_file):
print('CONF/{0}.{1}.conf'.format(grism, filter))
conf_file = os.path.join(GRIZLI_PATH,
'CONF/NIRISS.{0}.conf'.format(filter))
# if instrume == 'NIRCam':
# conf_file = os.path.join(GRIZLI_PATH,
# 'CONF/aXeSIM_NC_2016May/CONF/NIRCam_LWAR_{0}.conf'.format(grism))
if instrume in ['NIRCAM']:
#conf_file = os.path.join(GRIZLI_PATH,
# f'CONF/NIRCam.A.{filter}.{grism}.conf')
fi = grism
gr = filter[-1] # R, C
conf_file = os.path.join(GRIZLI_PATH,
f'CONF/GRISM_NIRCAM/gNIRCAM.{fi}.mod{module}.{gr}.conf')
elif instrume == 'NIRCAMA':
fi = grism
gr = filter[-1] # R, C
conf_file = os.path.join(GRIZLI_PATH,
f'CONF/GRISM_NIRCAM/gNIRCAM.{fi}.modA.{gr}.conf')
#conf_file = os.path.join(GRIZLI_PATH,
# f'CONF/NIRCam.B.{filter}.{grism}.conf')
elif instrume == 'NIRCAMB':
fi = grism
gr = filter[-1] # R, C
conf_file = os.path.join(GRIZLI_PATH,
f'CONF/GRISM_NIRCAM/gNIRCAM.{fi}.modB.{gr}.conf')
#conf_file = os.path.join(GRIZLI_PATH,
# f'CONF/NIRCam.B.{filter}.{grism}.conf')
if instrume == 'WFIRST':
conf_file = os.path.join(GRIZLI_PATH, 'CONF/WFIRST.conf')
if instrume == 'WFI':
conf_file = os.path.join(GRIZLI_PATH, 'CONF/Roman.G150.conf')
if instrume == 'SYN':
conf_file = os.path.join(GRIZLI_PATH, 'CONF/syn.conf')
# Euclid NISP, config files @
# http://www.astrodeep.eu/euclid-spectroscopic-simulations/
if instrume == 'NISP':
if grism == 'BLUE':
conf_file = os.path.join(GRIZLI_PATH, 'CONF/Euclid.Gblue.0.conf')
else:
conf_file = os.path.join(GRIZLI_PATH, 'CONF/Euclid.Gred.0.conf')
return conf_file
def load_grism_config(conf_file):
"""Load parameters from an aXe configuration file
Parameters
----------
conf_file : str
Filename of the configuration file
Returns
-------
conf : `~grizli.grismconf.aXeConf`
Configuration file object. Runs `conf.get_beams()` to read the
sensitivity curves.
"""
conf = aXeConf(conf_file)
conf.get_beams()
return conf
| mit |
dmsuehir/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_join_test.py | 2 | 8475 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test column naming and quantity of joins"""
import unittest
import sys
import os
from sparktkregtests.lib import sparktk_test
from sparktk import dtypes
from py4j.protocol import Py4JJavaError
class JoinTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Set up frames to be build """
super(JoinTest, self).setUp()
block_data = [[0, "sifuri"],
[1, "moja"],
[2, "mbili"],
[3, "tatu"],
[4, "nne"],
[5, "tano"]]
right_data = [[6, "sita"],
[7, "saba"],
[8, "nane"],
[9, "tisa"],
[10, "kumi"]]
schema = [("idnum", int), ("count", str)]
self.frame = self.context.frame.create(data=block_data, schema=schema)
self.right_frame = self.context.frame.create(data=right_data,
schema=schema)
self.empty_frame = self.context.frame.create(data=[], schema=schema)
def test_name_collision(self):
"""Test joining repeatedly doesn't collide """
# Repeatedly run join to force collisions
frame = self.frame.join_inner(
self.frame, left_on="idnum", right_on="idnum")
frame = frame.join_inner(
self.frame, left_on="idnum", right_on="idnum")
frame = frame.join_inner(
self.frame, left_on="idnum", right_on="idnum")
baseline = [u'idnum', u'count_L', u'count_R',
u'count_L_L', u'count_R_R']
self.assertItemsEqual(frame.column_names, baseline)
def test_type_int32(self):
"""Test join on int32"""
joined_frame = self.frame.join_inner(self.frame, "idnum")
pd_joined_sparktk = joined_frame.to_pandas(joined_frame.count())
pd_df = self.frame.to_pandas(self.frame.count())
joined_pd = pd_df.merge(
pd_df, on='idnum', suffixes=('_L', '_R'), how='inner')
del pd_joined_sparktk['idnum']
del joined_pd['idnum']
self.assertItemsEqual(
joined_pd.values.tolist(), pd_joined_sparktk.values.tolist())
def test_empty_partner_left(self):
"""Join with empty frame Left"""
join_frame = self.empty_frame.join_outer(self.frame, "idnum")
self.assertEquals(6, join_frame.count())
def test_empty_partner_right(self):
"""Join with empty frame Right"""
join_frame = self.frame.join_outer(self.empty_frame, "idnum")
self.assertEquals(6, join_frame.count())
def test_empty_partner_inner(self):
"""Join with empty frame Inner"""
join_frame = self.empty_frame.join_inner(
self.empty_frame, "idnum")
self.assertEquals(0, join_frame.count())
def test_disjoint_outer(self):
"""Test with no overlaps in the join column, outer join"""
join_frame = self.frame.join_outer(self.right_frame, "idnum")
self.assertEquals(11, join_frame.count())
def test_disjoint_left(self):
"""Test with no overlaps in the join column, left join"""
join_frame = self.frame.join_left(self.right_frame, "idnum")
self.assertEquals(6, join_frame.count())
def test_disjoint_right(self):
"""Test with no overlaps in the join column, right join"""
join_frame = self.frame.join_right(self.right_frame, "idnum")
self.assertEquals(5, join_frame.count())
def test_disjoint_inner(self):
"""Test with no overlaps in the join column, inner join"""
join_frame = self.frame.join_inner(self.right_frame, "idnum")
self.assertEquals(0, join_frame.count())
@unittest.skip("Compatibility check is changing")
def test_type_compatible(self):
"""Check compatibility among the numeric types"""
block_data = [
[0, "sifuri"],
[1, "moja"],
[2, "mbili"],
[3, "tatu"],
[4, "nne"],
[5, "tano"]
]
def complete_data_left(row):
return block_data[int(row.idnum)]
# Create a frame indexed by each of the numeric types
int32_frame = frame_utils.build_frame(
block_data,
[("idnum", int), ("count", str)],
self.prefix, file_format="list")
int64_frame = frame_utils.build_frame(
block_data,
[("idnum", int64), ("count", str)],
self.prefix, file_format="list")
flt32_frame = frame_utils.build_frame(
block_data,
[("idnum", float), ("count", str)],
self.prefix, file_format="list")
flt64_frame = frame_utils.build_frame(
block_data,
[("idnum", float), ("count", str)],
self.prefix, file_format="list")
# Try each join combination;
# make sure each type gets to be left & right at least once.
# float32 & float64,
# int32 & int64 are compatible pairs.
join_i32_i64 = int32_frame.join(int64_frame, "idnum")
print join_i32_i64.inspect()
self.assertEquals(int32_frame.count(), join_i32_i64.count())
join_i32_f32 = int32_frame.join(flt32_frame, "idnum")
print join_i32_f32.inspect()
self.assertEquals(int32_frame.count(), join_i32_f32.count())
# int and float are not compatible with each other.
with(self.assertRaisesRegexp(
Exception,
"Join columns must have compatible data types")):
flt32_frame.join(int32_frame, "idnum")
with(self.assertRaisesRegexp(
Exception,
"Join columns must have compatible data types")):
flt32_frame.join(int64_frame, "idnum")
with(self.assertRaisesRegexp(
Exception,
"Join columns must have compatible data types")):
flt64_frame.join(int32_frame, "idnum")
with(self.assertRaisesRegexp(
Exception,
"Join columns must have compatible data types")):
flt64_frame.join(int64_frame, "idnum")
def test_type_fail(self):
"""test join fails on mismatched type"""
bad_schema = [("a", str), ('idnum', str)]
frame2 = self.context.frame.create(data=[], schema=bad_schema)
with(self.assertRaisesRegexp(
Exception,
"Join columns must have compatible data types")):
self.frame.join_inner(frame2, "idnum")
def test_join_no_column_left(self):
""" test join faults on invalid left column"""
with(self.assertRaisesRegexp(
Exception,
"No column named no_such_column")):
self.frame.join_inner(
self.frame, left_on="no_such_column", right_on="idnum")
def test_join_no_column_right(self):
""" test join faults on ivalid right column"""
with(self.assertRaisesRegexp(
Exception,
"No column named no_such_column")):
self.frame.join_inner(
self.frame, left_on="idnum", right_on="no_such_column")
def test_join_no_column_either(self):
""" test join faults on invalid left and right column"""
with(self.assertRaisesRegexp(Exception, "No column named no_column")):
self.frame.join_inner(
self.frame, left_on="no_column", right_on="no_column")
def test_join_empty_column(self):
""" test join faults with empty string for column"""
with(self.assertRaisesRegexp(Exception, "No column named")):
self.frame.join_inner(self.frame, left_on="idnum", right_on="")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
deeuu/loudness | python/tests/test_PowerSpectrum.py | 1 | 2584 | import matplotlib.pyplot as plt
import numpy as np
import loudness as ln
# Input setup
fs = 32000
frameSizes = np.array([2048, 1024, 512])
nSources = 1 # hard-coded for one source
nEars = 2 # hard-coded for two ears
nBands = frameSizes.size
x = np.random.randn(nSources, nEars, nBands, frameSizes[0])
for frame in range(1, nBands):
x[0, 0, frame, frameSizes[frame]:] = 0
x[0, 1, frame, frameSizes[frame]:] = 0
x /= np.max(np.abs(x), -1).reshape((nSources, nEars, frameSizes.size, 1))
# Initialisation
inputBuf = ln.SignalBank()
inputBuf.initialize(nSources, nEars, nBands, frameSizes[0], fs)
inputBuf.setSignals(x)
# Power spectrum setup
bandFreqs = np.array([10, 500, 5000, 15001])
uniform = True
spectrumModule = ln.PowerSpectrum(bandFreqs,
frameSizes,
uniform,
ln.PowerSpectrum.AVERAGE_POWER,
1.0)
spectrumModule.initialize(inputBuf)
spectrumBank = spectrumModule.getOutput()
spectrumLoudness = spectrumBank.getSignals()
# Processing
spectrumModule.process(inputBuf)
# numpy side
fftSizes = np.zeros(nBands, dtype='int')
for band in range(nBands):
if uniform:
fftSizes[band] = int(2 ** np.ceil(np.log2(frameSizes[0])))
else:
fftSizes[band] = int(2 ** np.ceil(np.log2(frameSizes[band])))
bandBinIndices = np.zeros((nBands, 2))
nBins = 0
for band in range(nBands):
bandBinIndices[band][0] = np.ceil(
bandFreqs[band] * fftSizes[band] / float(fs))
bandBinIndices[band][1] = np.ceil(
bandFreqs[band + 1] * fftSizes[band] / float(fs)) - 1
nBins += bandBinIndices[band][1] - bandBinIndices[band][0] + 1
spectrumNumpy = np.zeros((nEars, nBins))
idxLoIn = 0
for band in range(nBands):
X = np.fft.fft(x[0, :, band, 0:frameSizes[band]], fftSizes[band])
idxLo = bandBinIndices[band][0]
idxHi = bandBinIndices[band][1] + 1
idxHiIn = idxLoIn + (idxHi - idxLo)
spectrumNumpy[0, idxLoIn:idxHiIn] = (2 * np.abs(X[0, idxLo:idxHi]) ** 2
/ (frameSizes[band] * fftSizes[band]))
spectrumNumpy[1, idxLoIn:idxHiIn] = (2 * np.abs(X[1, idxLo:idxHi]) ** 2
/ (frameSizes[band] * fftSizes[band]))
idxLoIn = idxHiIn
# check
plt.plot(spectrumLoudness[0, 0, :, 0])
plt.plot(spectrumNumpy[0, :])
plt.show()
if np.allclose(spectrumLoudness[0, :, :, 0], spectrumNumpy):
print "Numpy vs loudness power spectrum test: successful"
else:
print "Numpy vs loudness power spectrum test: unsuccessful"
| gpl-3.0 |
silky/sms-tools | lectures/05-Sinusoidal-model/plots-code/synthesis-window.py | 22 | 1725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY, Blackman-Harris, Ns = 512")
plt.subplot(3,1,2)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y, size = Ns = 512 (Blackman-Harris window)")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b',lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window.png')
plt.show()
| agpl-3.0 |
d00d/quantNotebooks | Notebooks/strategies/from quantopian.algorithm import attach_pipeline,.py | 1 | 4644 | from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import CustomFactor, SimpleMovingAverage
from quantopian.pipeline.data import morningstar
import pandas as pd
import numpy as np
v = morningstar.valuation
# --- Liquidity Factor ---
class AvgDailyDollarVolumeTraded(CustomFactor):
inputs = [USEquityPricing.close, USEquityPricing.volume]
window_length = 20
def compute(self, today, assets, out, close_price, volume):
out[:] = np.mean(close_price * volume, axis=0)
# --- Value & Growth Factor ---
class Value(CustomFactor):
#EV_To_Sales_SalesGrowth_12M
inputs = [morningstar.income_statement.total_revenue, v.enterprise_value]
window_length = 252
def compute(self, today, assets, out, sales, ev):
out[:] = ev[-1] / ((sales[-1] * 4)/(((sales[-1] * 4) - (sales[0]) * 4) / (sales[0] * 4)))
# --- Momentum Factor ---
# --- 9/13: Modified Momentum factor to include (I/S)*LT scheme (I=50d, S=20d, LT=140d)
class Momentum(CustomFactor):
inputs = [USEquityPricing.close]
window_length = 140
def compute(self, today, assets, out, close):
out[:] = ((close[-1] / close[-50]) / (close[-1] / (close[-20]))* close[-1])
# --- Quality Factor ---
class Quality(CustomFactor):
inputs = [morningstar.operation_ratios.roe]
window_length = 1
def compute(self, today, assets, out, roe):
out[:] = roe[-1]
# --- Volatility Factor ---
#-- 9/13 High Alpha Mean Reversion on 12M & 3M volatility
class Volatility(CustomFactor):
inputs = [USEquityPricing.close]
window_length = 252
def compute(self, today, assets, out, close):
close = pd.DataFrame(data=close, columns=assets)
# Since we are going to rank largest is best we need to invert the sdev.
out[:] = 1 / np.log(close).diff().std()
# Compute final rank and assign long and short baskets.
def before_trading_start(context, data):
results = pipeline_output('factors').dropna()
ranks = results.rank().mean(axis=1).order()
context.shorts = 1 / ranks.head(200)
context.shorts /= context.shorts.sum()
context.longs = ranks.tail(200)
context.longs /= context.longs.sum()
update_universe(context.longs.index + context.shorts.index)
# Put any initialization logic here. The context object will be passed to
# the other methods in your algorithm.
def initialize(context):
pipe = Pipeline()
pipe = attach_pipeline(pipe, name='factors')
pipe.add(Value(), "value")
pipe.add(Momentum(), "momentum")
pipe.add(Quality(), "quality")
pipe.add(Volatility(), "volatility")
sma_200 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=200)
dollar_volume = AvgDailyDollarVolumeTraded()
# Screen out penny stocks and low liquidity securities.
pipe.set_screen((sma_200 > 5) & (dollar_volume > 10**7))
context.spy = sid(8554)
context.shorts = None
context.longs = None
schedule_function(rebalance, date_rules.month_start())
schedule_function(cancel_open_orders, date_rules.every_day(),
time_rules.market_close())
# Will be called on every trade event for the securities you specify.
def handle_data(context, data):
record(lever=context.account.leverage,
exposure=context.account.net_leverage,
num_pos=len(context.portfolio.positions),
oo=len(get_open_orders()))
def cancel_open_orders(context, data):
for security in get_open_orders():
for order in get_open_orders(security):
cancel_order(order)
def rebalance(context, data):
for security in context.shorts.index:
if get_open_orders(security):
continue
if security in data:
order_target_percent(security, -context.shorts[security])
for security in context.longs.index:
if get_open_orders(security):
continue
if security in data:
order_target_percent(security, context.longs[security])
for security in context.portfolio.positions:
if get_open_orders(security):
continue
if security in data:
if security not in (context.longs.index + context.shorts.index):
order_target_percent(security, 0)
| unlicense |
xguse/ggplot | ggplot/tests/test_theme_mpl.py | 12 | 3907 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import six
from nose.tools import assert_true
from ggplot.tests import image_comparison, cleanup
from ggplot import *
def _diff(a, b):
ret = {}
for key, val in a.items():
if key in b:
if b[key] != val:
ret[key] = "%s: %s -> %s" % (key, val, b[key])
else:
ret[key] = "%s: %s -> %s" % (key, val, "--")
for key, val in b.items():
if key not in a:
ret[key] = "%s: %s -> %s" % (key, "--", val)
return ret
@cleanup
def test_theme_matplotlib():
gg = ggplot(aes(x='date', y='beef'), data=meat)
a = mpl.rcParams.copy()
_theme = theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False)
assert_true(len(_theme._rcParams) < 2, "setting font.family changed more than that in the theme. %s" % list(six.iterkeys(_theme._rcParams))[:5])
gg = gg + _theme
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "setting font.family changed more than that in ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
@image_comparison(baseline_images=['theme_clean', 'theme_mpl_completly'])
def test_theme_matplotlib2():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
a = mpl.rcParams.copy()
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 1, "Just plotting changed something in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
print(gg + theme_matplotlib())
@image_comparison(baseline_images=['theme_clean2', 'theme_mpl_only_one'])
def test_theme_matplotlib3():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
a = mpl.rcParams.copy()
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 1, "Just plotting changed something in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
_theme = theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False)
gg = gg + _theme
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "Setting just one param changed more in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "Plotting after setting just one param changed more in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
@image_comparison(baseline_images=['theme_mpl_all_before', 'theme_mpl_all_after'])
def test_theme_matplotlib4():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
print(gg + theme_matplotlib())
print(gg + theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False))
@image_comparison(baseline_images=['theme_mpl_all_before'])
def test_theme_matplotlib5():
# Make sure the last complete theme wins.
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
print(gg + theme_gray() + theme_matplotlib())
def test_theme_matplotlib6():
tmpl = theme_matplotlib()
assert_true(tmpl.complete)
| bsd-2-clause |
amjames/psi4 | psi4/driver/qcdb/util/mpl.py | 1 | 2263 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
def plot_coord(ref, cand=None, orig=None, comment=None):
"""Display target geometry `ref` as black dots in 3D plot. If present, also
plot candidate geometry `cand` as red dots and starting geometry `orig` as
pale blue dots. Plot has text `comment`. For assessing alignment, red and
black should overlap and pale blue shows where red started.
"""
try:
from matplotlib import pyplot
except ImportError:
raise ImportError("""Install matplotlib. `conda install matplotlib` or https://matplotlib.org/faq/installing_faq.html""")
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
bound = max(np.amax(ref), -1 * np.amin(ref))
ax.scatter(ref[:, 0], ref[:, 1], ref[:, 2], c='k', label='goal')
if cand is not None:
ax.scatter(cand[:, 0], cand[:, 1], cand[:, 2], c='r', label='post-align')
if orig is not None:
ax.scatter(orig[:, 0], orig[:, 1], orig[:, 2], c='lightsteelblue', label='pre-align')
if comment is not None:
ax.text2D(0.05, 0.95, comment, transform=ax.transAxes)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(-bound, bound)
ax.set_ylim(-bound, bound)
ax.set_zlim(-bound, bound)
ax.legend()
pyplot.show()
| lgpl-3.0 |
ContinuumIO/chaco | chaco/scales/formatters.py | 3 | 23402 | """
Classes for formatting labels for values or times.
"""
from math import ceil, floor, fmod, log10
from numpy import abs, all, array, asarray, amax, amin
from safetime import strftime, time, safe_fromtimestamp, localtime
import warnings
__all__ = ['NullFormatter', 'BasicFormatter', 'IntegerFormatter',
'OffsetFormatter', 'TimeFormatter', 'strftimeEx']
class NullFormatter(object):
""" Formatter for empty labels.
"""
def format(ticks, numlabels=None, char_width=None):
""" Returns a list containing an empty label for each item in *ticks*.
"""
return [""] * len(ticks)
def estimate_width(start, end, numlabels=None, char_width=None):
""" Returns 0 for width and 0 for number of labels.
"""
return 0, 0
class BasicFormatter(object):
""" Formatter for numeric labels.
"""
# This is a class-level default that is related to the algorithm in format()
avg_label_width = 7.0
# Toggles whether or not to use scientific notation when the values exceed
# scientific_limits
use_scientific = True
# Any number smaller than 10 ** limits[0] or larger than 10 ** limits[1]
# will be represented using scientific notiation.
scientific_limits = (-3, 5)
def __init__(self, **kwds):
# Allow the user to override the class-level defaults.
self.__dict__.update(kwds)
def oldformat(self, ticks, numlabels=None, char_width=None):
""" This function is adapted from matplotlib's "OldScalarFormatter".
Parameters
----------
ticks : array of numbers
The tick values to be formatted.
numlabels
Not used.
char_width
Not used.
Returns
-------
List of formatted labels.
"""
labels = []
if len(ticks) == 0:
return []
d = abs(ticks[-1] - ticks[0])
for x in ticks:
if abs(x)<1e4 and x==int(x):
labels.append('%d' % x)
continue
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if sign or exponent:
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = mantissa
else:
s = s.rstrip('0').rstrip('.')
labels.append(s)
return labels
def format(self, ticks, numlabels=None, char_width=None, fill_ratio=0.3):
""" Does "nice" formatting of floating-point numbers. *numlabels* is
ignored in this method.
"""
if len(ticks) == 0:
return []
ticks = asarray(ticks)
if self.use_scientific:
scientific = (((ticks % 10 ** self.scientific_limits[1]) == 0) |
(abs(ticks) <= 10 ** self.scientific_limits[0])).all()
else:
scientific = False
if scientific:
if char_width is not None:
# We need to determine how many digits we can use in the
# mantissa based on the order of magnitude of the exponent.
chars_per_label = int(char_width * fill_ratio / len(ticks))
maxtick = amax(abs(ticks))
if maxtick > 0:
exp_oom = str(int(floor(log10(maxtick))))
else:
exp_oom = "0"
emax = len(exp_oom)
if chars_per_label < emax:
# We're sort of hosed. Use a minimum 3 chars for the mantissa.
mmax = 3
else:
mmax = chars_per_label - emax - 1
else:
mmax = -1
labels = [self._nice_sci(x, mmax) for x in ticks]
else:
# For decimal mode,
if not (ticks % 1).any():
labels = map(str, ticks.astype(int))
else:
labels = map(str, ticks)
return labels
def _nice_sci(self, val, mdigits, force_sign=False):
""" Formats *val* nicely using scientific notation. *mdigits* is the
max number of digits to use for the mantissa. If *force_sign* is True,
then always show the sign of the mantissa, otherwise only show the sign
if *val* is negative.
"""
if val != 0:
e = int(floor(log10(abs(val))))
else:
e = 0
m = val / float(10**e)
m_str = str(m)
# Safely truncating the mantissa is somewhat tricky. The minimum
# length of the mantissa is everything up to (but not including) the
# period. If the m_str doesn't have a decimal point, then we have to
# ignore mdigits.
if mdigits > 0 and "." in m_str:
max_len = max(m_str.index("."), mdigits)
m_str = m_str[:max_len]
# Strip off a trailing decimal
if m_str[-1] == ".":
m_str = m_str[:-1]
# It's not sufficient just to truncate the string; we need to
# handle proper rounding
else:
# Always strip off a trailing decimal
if m_str[-1] == ".":
m_str = m_str[:-1]
if force_sign and not m_str.startswith("-"):
m_str = "+" + m_str
if e != 0:
# Clean up the exponent
e_str = str(e)
if e_str.startswith("+") and not force_sign:
e_str = e_str[1:]
m_str += "e" + e_str
return m_str
def estimate_width(self, start, end, numlabels=None, char_width=None,
fill_ratio=0.3, ticker=None):
""" Returns an estimate of the total number of characters used by the
the labels for the given set of inputs, as well as the number of labels.
Parameters
----------
start : number
The beginning of the interval.
end : number
The end of the interval.
numlabels : number
The ideal number of labels to generate on the interval.
char_width : number
The total character width available for labelling the interval.
fill_ratio : 0.0 < float <= 1.0
Ratio of the available width that will be occupied by label text.
ticker : AbstractScale object
Object that can calculate the number of labels needed.
Returns
-------
(numlabels, total label width)
"""
if numlabels == 0 or char_width == 0:
return 0, 0
# use the start and end points as ticks and average their label sizes
labelsizes = map(len, self.format([start, end]))
avg_size = sum(labelsizes) / 2.0
if ticker:
if numlabels:
initial_estimate = numlabels
elif char_width:
initial_estimate = round(fill_ratio * char_width / avg_size)
est_ticks = ticker.num_ticks(start, end, initial_estimate)
elif numlabels:
est_ticks = numlabels
elif char_width:
est_ticks = round(fill_ratio * char_width / avg_size)
return est_ticks, est_ticks * avg_size
class IntegerFormatter(BasicFormatter):
""" Format integer tick labels as integers.
"""
def format(self, ticks, numlabels=None, char_width=None, fill_ratio=0.3):
""" Formats integer tick labels.
"""
return map(str, map(int, ticks))
class OffsetFormatter(BasicFormatter):
""" This formatter is like BasicFormatter, but it supports formatting
ticks using an offset. This is useful for viewing small ranges within
big numbers.
"""
# Whether or not to use offsets when labelling the ticks. Note that
# even if this is true, offset are only used when the ratio of the data
# range to the average data value is smaller than a threshold.
use_offset = False
# The threshold ratio of the data range to the average data value, below
# which "offset" display mode will be used if use_offset is True.
offset_threshold = 1e-3
# Determines which ticks to display the offset value at. Can be "all",
# "firstlast", or "none".
offset_display = "firstlast"
# Determines which format to use to display the end labels. Can be
# "offset" or "sci".
end_label_format = "offset"
# Specifies the threshold values
offset_limits = (-3, 4)
# There are two possible formats for the offset.
#
# "sci"
# uses scientific notation for the offset
# "decimal"
# pads with zeroes left or right until the decimal
#
# The following table shows some example ranges and how an intermediate
# tick will be displayed. These all assume an offset_display value of
# "none" or "firstlast".
#
# ============ ========== ========= =========
# start end sci decimal
# ============ ========== ========= =========
# 90.0004 90.0008 5.0e-4 .0005
# 90.0004 90.0015 1.2e-3 .0012
# -1200015 -1200003 12 12
# 2300015000 2300015030 1.502e4 15020
# ============ ========== ========= =========
#
offset_format = "sci"
# The offset generated by the last call to format()
offset = None
def _compute_offset(self, ticks):
first, last = ticks[0], ticks[-1]
data_range = ticks[-1] - ticks[0]
range_oom = int(ceil(log10(data_range)))
pow_of_ten = 10 ** range_oom
if all(asarray(ticks) < 0):
return ceil(amax(ticks) / pow_of_ten) * pow_of_ten
else:
return floor(amin(ticks) / pow_of_ten) * pow_of_ten
def format(self, ticks, numlabels=None, char_width=None):
if len(ticks) == 0:
return []
data_range = ticks[-1] - ticks[0]
avg_data = sum(abs(ticks)) / len(ticks)
if self.use_offset and data_range/avg_data < self.offset_threshold:
offset = self._compute_offset(ticks)
intermed_ticks = asarray(ticks) - offset
if self.offset_format == "sci":
labels = BasicFormatter.format(self, intermed_ticks)
else:
# have to decide between %d and %f here. also have to
# strip trailing "0"s.. test with %g.
labels = ["%g" % i for i in intermed_ticks]
if offset > 0:
sign = "+"
else:
sign = ""
offset_str = BasicFormatter.format(self, [offset])[0] + sign
if self.offset_display == "firstlast":
if self.end_label_format == "offset":
labels[0] = offset_str + labels[0]
labels[-1] = offset_str + labels[-1]
else:
labels[0] = BasicFormatter.format(self, [ticks[0]])[0]
labels[-1] = BasicFormatter.format(self, [ticks[-1]])[0]
elif self.offset_display == "all":
labels = [offset_str + label for label in labels]
return labels
else:
return BasicFormatter.format(self, ticks, numlabels, char_width)
def estimate_width(self, start, end, numlabels=None, char_width=None,
fill_ratio=0.3, ticker=None):
if numlabels == 0 or char_width == 0:
return (0, 0)
if ticker:
if numlabels:
initial_estimate = numlabels
elif char_width:
avg_size = len("%g%g" % (start, end)) / 2.0
initial_estimate = round(fill_ratio * char_width / avg_size)
est_ticks = int(ticker.num_ticks(start, end, initial_estimate))
elif numlabels:
est_ticks = numlabels
elif char_width:
est_ticks = round(fill_ratio * char_width / avg_size)
start, mid, end = map(len, self.format([start, (start+end)/2.0, end]))
if est_ticks > 2:
size = start + end + (est_ticks-2) * mid
else:
size = start + end
return est_ticks, size
def strftimeEx(fmt, t, timetuple=None):
"""
Extends time.strftime() to format milliseconds and microseconds.
Expects input to be a floating-point number of seconds since epoch.
The additional formats are:
- ``%(ms)``: milliseconds (uses round())
- ``%(ms_)``: milliseconds (uses floor())
- ``%(us)``: microseconds (uses round())
The format may also be a callable which will bypass time.strftime() entirely.
"""
if callable(fmt):
return fmt(t)
if "%(ms)" in fmt:
# Assume that fmt does not also contain %(ms_) and %(us).
# (It really doesn't make sense to mix %(ms) with those.)
secs, frac = divmod(round(t,3), 1)
ms = int(round(1e3*frac))
fmt = fmt.replace("%(ms)", "%03d" % ms)
else:
# Assume fmt contains %(ms_) and %(us).
secs, frac = divmod(round(t,6), 1)
ms = int(round(1e3*frac))
ms_, us = divmod(int(round(1e6*frac)),1000)
fmt = fmt.replace("%(ms_)", "%03d" % ms_)
fmt = fmt.replace("%(us)", "%03d" % us)
if not timetuple:
timetuple = localtime(secs)
return strftime(fmt, timetuple)
def _two_digit_year(t):
""" Round to the nearest Jan 1, roughly.
"""
dt = safe_fromtimestamp(t)
year = dt.year
if dt.month >= 7:
year += 1
return "'%02d" % (year % 100)
def _four_digit_year(t):
""" Round to the nearest Jan 1, roughly.
"""
dt = safe_fromtimestamp(t)
year = dt.year
if dt.month >= 7:
year += 1
return str(year)
class TimeFormatter(object):
""" Formatter for time values.
"""
# This table of format is convert into the 'formats' dict. Each tuple of
# formats must be ordered from shortest to longest.
_formats = {
'microseconds': ('%(us)us', '%(ms_).%(us)ms'),
'milliseconds': ('%(ms)ms', '%S.%(ms)s'),
'seconds': (':%S', '%Ss'),
'minsec': ('%M:%S',), # '%Mm%S', '%Mm%Ss'),
'minutes': ('%Mm',),
'hourmin': ('%H:%M',), #'%Hh%M', '%Hh%Mm', '%H:%M:%S','%Hh %Mm %Ss'),
'hours': ('%Hh', '%H:%M'),
'days': ('%m/%d', '%a%d',),
'months': ('%m/%Y', '%b%y'),
'years': (_two_digit_year, _four_digit_year),
}
# Labels of time units, from finest to coarsest.
format_order = ['microseconds', 'milliseconds', 'seconds', 'minsec', 'minutes',
'hourmin', 'hours', 'days', 'months', 'years']
# A dict whose are keys are the strings in **format_order**; each value is
# two arrays, (widths, format strings/functions).
formats = {}
# Whether or not to strip the leading zeros on tick labels.
strip_leading_zeros = True
def __init__(self, **kwds):
self.__dict__.update(kwds)
self._compute_format_weights()
def _compute_format_weights(self):
if self.formats:
return
for fmt_name, fmt_strings in self._formats.items():
sizes = []
tmptime = time()
for s in fmt_strings:
size = len(strftimeEx(s, tmptime))
sizes.append(size)
self.formats[fmt_name] = (array(sizes), fmt_strings)
return
def _get_resolution(self, resolution, interval):
r = resolution
span = interval
if r < 5e-4:
resol = "microseconds"
elif r < 0.5:
resol = "milliseconds"
elif r < 60:
if span > 60:
resol = "minsec"
else:
resol = "seconds"
elif r < 3600:
if span > 3600:
resol = "hourmin"
else:
resol = "minutes"
elif r < 24*3600:
resol = "hours"
elif r < 30*24*3600:
resol = "days"
elif r < 365*24*3600:
resol = "months"
else:
resol = "years"
return resol
def format(self, ticks, numlabels=None, char_width=None, fill_ratio = 0.3,
ticker=None):
""" Formats a set of time values.
Parameters
----------
ticks : array of numbers
The tick values to be formatted
numlabels
Not used.
char_width : number
The total character width available for labelling the interval.
fill_ratio : 0.0 < float <= 1.0
Ratio of the available width that will be occupied by label text.
ticker : AbstractScale object
Object that can calculate the number of labels needed.
Returns
-------
List of formatted labels.
"""
# In order to pick the right set of labels, we need to determine
# the resolution of the ticks. We can do this using a ticker if
# it's provided, or by computing the resolution from the actual
# ticks we've been given.
if len(ticks) == 0:
return []
span = abs(ticks[-1] - ticks[0])
if ticker:
r = ticker.resolution
else:
r = span / (len(ticks) - 1)
resol = self._get_resolution(r, span)
widths, formats = self.formats[resol]
format = formats[0]
if char_width:
# If a width is provided, then we pick the most appropriate scale,
# otherwise just use the widest format
good_formats = array(formats)[widths * len(ticks) < fill_ratio * char_width]
if len(good_formats) > 0:
format = good_formats[-1]
# Apply the format to the tick values
labels = []
resol_ndx = self.format_order.index(resol)
# This dictionary maps the name of a time resolution (in self.format_order)
# to its index in a time.localtime() timetuple. The default is to map
# everything to index 0, which is year. This is not ideal; it might cause
# a problem with the tick at midnight, january 1st, 0 a.d. being incorrectly
# promoted at certain tick resolutions.
time_tuple_ndx_for_resol = dict.fromkeys(self.format_order, 0)
time_tuple_ndx_for_resol.update( {
"seconds" : 5,
"minsec" : 4,
"minutes" : 4,
"hourmin" : 3,
"hours" : 3,
})
# As we format each tick, check to see if we are at a boundary of the
# next higher unit of time. If so, replace the current format with one
# from that resolution. This is not the best heuristic in the world,
# but it works! There is some trickiness here due to having to deal
# with hybrid formats in a reasonable manner.
for t in ticks:
try:
tm = localtime(t)
s = strftimeEx(format, t, tm)
except ValueError, e:
warnings.warn("Unable to convert tick for timestamp " + str(t))
labels.append("ERR")
continue
hybrid_handled = False
next_ndx = resol_ndx
# The way to check that we are at the boundary of the next unit of
# time is by checking that we have 0 units of the resolution, i.e.
# we are at zero minutes, so display hours, or we are at zero seconds,
# so display minutes (and if that is zero as well, then display hours).
while tm[ time_tuple_ndx_for_resol[self.format_order[next_ndx]] ] == 0:
next_ndx += 1
if next_ndx == len(self.format_order):
break
if resol in ("minsec", "hourmin") and not hybrid_handled:
if (resol == "minsec" and tm.tm_min == 0 and tm.tm_sec != 0) or \
(resol == "hourmin" and tm.tm_hour == 0 and tm.tm_min != 0):
next_format = self.formats[self.format_order[resol_ndx-1]][1][0]
s = strftimeEx(next_format, t, tm)
break
else:
hybrid_handled = True
next_format = self.formats[self.format_order[next_ndx]][1][0]
s = strftimeEx(next_format, t, tm)
if self.strip_leading_zeros:
ss = s.lstrip('0')
if ss != s and (ss == '' or not ss[0].isdigit()):
# A label such as '000ms' should leave one zero.
ss = '0' + ss
labels.append(ss)
else:
labels.append(s)
return labels
def estimate_width(self, start, end, numlabels=None, char_width=None,
fill_ratio = 0.2, ticker=None):
""" Returns an estimate of the total number of characters used by the
the labels for the given set of inputs, as well as the number of labels.
Parameters
----------
start : number
The beginning of the interval.
end : number
The end of the interval.
numlabels : number
The ideal number of labels to generate on the interval.
char_width : number
The total character width available for labelling the interval.
fill_ratio : 0.0 < float <= 1.0
Ratio of the available width that will be occupied by label text.
ticker : AbstractScale object
Object that can calculate the number of labels needed.
Returns
-------
(numlabels, total label width)
"""
if numlabels == 0 or char_width == 0:
return 0, 0
if ticker is None or not hasattr(ticker, "unit"):
raise ValueError("TimeFormatter requires a scale.")
if not numlabels:
numlabels = ticker.num_ticks(start, end)
span = abs(end - start)
if ticker:
r = ticker.resolution
else:
r = span / numlabels
unit = self._get_resolution(r, span)
if unit == "milliseconds":
return numlabels, numlabels * 6
widths, strings = self.formats[unit]
if char_width:
# Find an appropriate resolution in self.formats and pick between
# the various format strings
good_widths = widths[widths * numlabels < fill_ratio * char_width]
if len(good_widths) == 0:
# All too big, pick the first label
width = widths[0]
else:
# Pick the largest label that fits
width = good_widths[-1]
width *= numlabels
else:
# Just pick the middle of the pack of format widths
width = widths[ int(len(widths) / 2) ] * numlabels
return numlabels, width
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dates.py | 15 | 33969 | """
Matplotlib provides sophisticated date plotting capabilities, standing
on the shoulders of python :mod:`datetime`, the add-on modules
:mod:`pytz` and :mod:`dateutils`. :class:`datetime` objects are
converted to floating point numbers which represent the number of days
since 0001-01-01 UTC. The helper functions :func:`date2num`,
:func:`num2date` and :func:`drange` are used to facilitate easy
conversion to and from :mod:`datetime` and numeric ranges.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutils.rrule` (`dateutil
<https://moin.conectiva.com.br/DateUtil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
Date formatters
---------------
Here all all the date formatters:
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
import re, time, math, datetime
import pytz
# compatability for 2008c and older versions
try:
import pytz.zoneinfo
except ImportError:
pytz.zoneinfo = pytz.tzinfo
pytz.zoneinfo.UTC = pytz.UTC
import matplotlib
import numpy as np
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from pytz import timezone
from dateutil.rrule import rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, \
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY
from dateutil.relativedelta import relativedelta
import dateutil.parser
__all__ = ( 'date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'DateLocator', 'RRuleLocator',
'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR',
'SA', 'SU', 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
UTC = pytz.timezone('UTC')
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
return pytz.timezone(s)
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60.*HOURS_PER_DAY
SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None: tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24*remainder, 1)
minute, remainder = divmod(60*remainder, 1)
second, remainder = divmod(60*remainder, 1)
microsecond = int(1e6*remainder)
if microsecond<10: microsecond=0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond>999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6-microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC.
"""
if not cbook.iterable(d): return _to_ordinalf(d)
else: return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j): j = np.asarray(j)
return j + 1721425.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n): n = np.asarray(n)
return n - 1721425.5
def num2date(x, tz=None):
"""
*x* is a float value which gives number of days (fraction part
represents hours, minutes, seconds) since 0001-01-01 00:00:00 UTC.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None: tz = _get_rc_timezone()
if not cbook.iterable(x): return _from_ordinalf(x, tz)
else: return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds/SECONDS_PER_DAY +
delta.microseconds/MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
return np.arange(f1, f2, step)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = self._findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None: tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind>=len(self.t) or ind<=0: return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None):
self._locator = locator
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", tz)
self._tz = tz
def __call__(self, x, pos=0):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter("%Y", self._tz)
elif ( scale == 30.0 ):
self._formatter = DateFormatter("%b %Y", self._tz)
elif ( (scale == 1.0) or (scale == 7.0) ):
self._formatter = DateFormatter("%b %d %Y", self._tz)
elif ( scale == (1.0/24.0) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*60)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*3600)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
else:
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour':0, 'byminute':0,'bysecond':0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def nonsingular(self, vmin, vmax):
unit = self._get_unit()
vmin -= 2*unit
vmax += 2*unit
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try: dmin, dmax = self.viewlim_to_dt()
except ValueError: return []
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dates = self.rule.between(dmin, dmax, True)
return date2num(dates)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
if ( freq == YEARLY ):
return 365
elif ( freq == MONTHLY ):
return 30
elif ( freq == WEEKLY ):
return 7
elif ( freq == DAILY ):
return 1
elif ( freq == HOURLY ):
return (1.0/24.0)
elif ( freq == MINUTELY ):
return (1.0/(24*60))
elif ( freq == SECONDLY ):
return (1.0/(24*3600))
else:
# error
return -1 #or should this just return '1'?
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin: vmin=dmin
vmax = self.rule.after(dmax, True)
if not vmax: vmax=dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None):
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if ( self._freq == YEARLY ):
return 365.0
elif ( self._freq == MONTHLY ):
return 30.0
elif ( self._freq == WEEKLY ):
return 7.0
elif ( self._freq == DAILY ):
return 1.0
elif ( self._freq == HOURLY ):
return 1.0/24
elif ( self._freq == MINUTELY ):
return 1.0/(24*60)
elif ( self._freq == SECONDLY ):
return 1.0/(24*3600)
else:
# error
return -1
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range(1, 13)
if ( (0 <= numMonths) and (numMonths <= 14) ):
interval = 1 # show every month
elif ( (15 <= numMonths) and (numMonths <= 29) ):
interval = 3 # show every 3 months
elif ( (30 <= numMonths) and (numMonths <= 44) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range(1, 32)
if ( (0 <= numDays) and (numDays <= 9) ):
interval = 1 # show every day
elif ( (10 <= numDays) and (numDays <= 19) ):
interval = 2 # show every 2 days
elif ( (20 <= numDays) and (numDays <= 49) ):
interval = 3 # show every 3 days
elif ( (50 <= numDays) and (numDays <= 99) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range(0, 24) # show every hour
if ( (0 <= numHours) and (numHours <= 14) ):
interval = 1 # show every hour
elif ( (15 <= numHours) and (numHours <= 30) ):
interval = 2 # show every 2 hours
elif ( (30 <= numHours) and (numHours <= 45) ):
interval = 3 # show every 3 hours
elif ( (45 <= numHours) and (numHours <= 68) ):
interval = 4 # show every 4 hours
elif ( (68 <= numHours) and (numHours <= 90) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range(0, 60)
if ( numMinutes > (10.0 * numticks) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range(0, 60)
if ( numSeconds > (10.0 * numticks) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval=interval, \
dtstart=dmin, until=dmax, \
bymonth=bymonth, bymonthday=bymonthday, \
byhour=byhour, byminute = byminute, \
bysecond=bysecond )
locator = RRuleLocator(rrule, self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = { 'month' : month,
'day' : day,
'hour' : 0,
'minute' : 0,
'second' : 0,
'tzinfo' : tz
}
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 365
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year>=ymax: return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None: bymonth=range(1,13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 30
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 7
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None: bymonthday=range(1,32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None: byhour=range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; use for
intelligent autoscaling
"""
return 1/24.
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None: byminute=range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None: bysecond=range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60*60)
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2-d1
mus = abs(delta.days*MUSECONDS_PER_DAY + delta.seconds*1e6 +
delta.microseconds)
assert(mus<epsilon)
def _close_to_num(o1, o2, epsilon=5):
'Assert that float ordinals *o1* and *o2* are within *epsilon* microseconds.'
delta = abs((o2-o1)*MUSECONDS_PER_DAY)
assert(delta<epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24.*3600.
return 719163 + np.asarray(e)/spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24.*3600.
return (np.asarray(d)-719163)*spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar: return ret[0]
else: return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span==0: span = 1/24.
minutes = span*24*60
hours = span*24
days = span
weeks = span/7.
months = span/31. # approx
years = span/365.
if years>numticks:
locator = YearLocator(int(years/numticks), tz=tz) # define
fmt = '%Y'
elif months>numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks>numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days>numticks:
locator = DayLocator(interval=int(math.ceil(days/numticks)), tz=tz)
fmt = '%b %d'
elif hours>numticks:
locator = HourLocator(interval=int(math.ceil(hours/numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes>numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes/numticks)), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s)/SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m)/MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h/24.
def weeks(w):
'Return weeks as days.'
return w*7.
class DateConverter(units.ConversionInterface):
def axisinfo(unit):
'return the unit AxisInfo'
if unit=='date':
majloc = AutoDateLocator()
majfmt = AutoDateFormatter(majloc)
return units.AxisInfo(
majloc = majloc,
majfmt = majfmt,
label='',
)
else: return None
axisinfo = staticmethod(axisinfo)
def convert(value, unit):
if units.ConversionInterface.is_numlike(value): return value
return date2num(value)
convert = staticmethod(convert)
def default_units(x):
'Return the default unit for *x* or None'
return 'date'
default_units = staticmethod(default_units)
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
if __name__=='__main__':
#tz = None
tz = pytz.timezone('US/Pacific')
#tz = UTC
dt = datetime.datetime(1011, 10, 9, 13, 44, 22, 101010, tzinfo=tz)
x = date2num(dt)
_close_to_dt(dt, num2date(x, tz))
#tz = _get_rc_timezone()
d1 = datetime.datetime( 2000, 3, 1, tzinfo=tz)
d2 = datetime.datetime( 2000, 3, 5, tzinfo=tz)
#d1 = datetime.datetime( 2002, 1, 5, tzinfo=tz)
#d2 = datetime.datetime( 2003, 12, 1, tzinfo=tz)
delta = datetime.timedelta(hours=6)
dates = drange(d1, d2, delta)
# MGDTODO: Broken on transforms branch
#print 'orig', d1
#print 'd2n and back', num2date(date2num(d1), tz)
from _transforms import Value, Interval
v1 = Value(date2num(d1))
v2 = Value(date2num(d2))
dlim = Interval(v1,v2)
vlim = Interval(v1,v2)
#locator = HourLocator(byhour=(3,15), tz=tz)
#locator = MinuteLocator(byminute=(15,30,45), tz=tz)
#locator = YearLocator(base=5, month=7, day=4, tz=tz)
#locator = MonthLocator(bymonthday=15)
locator = DayLocator(tz=tz)
locator.set_data_interval(dlim)
locator.set_view_interval(vlim)
dmin, dmax = locator.autoscale()
vlim.set_bounds(dmin, dmax)
ticks = locator()
fmt = '%Y-%m-%d %H:%M:%S %Z'
formatter = DateFormatter(fmt, tz)
#for t in ticks: print formatter(t)
for t in dates: print formatter(t)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.