repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ibab/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 2 | 20385 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import tempfile
import time
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.io import data_feeder
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
# Default metrics for evaluation.
_EVAL_METRICS = {
'regression': {
'mean_squared_error': losses.sum_of_squares,
},
'classification': {
'logistic': losses.sigmoid_cross_entropy,
},}
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `INFER`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
INFER = 'infer'
def _get_input_fn(x, y, batch_size):
# TODO(ipoloshukin): Remove this when refactor of data_feeder is done
if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'):
def input_fn():
return x.create_graph(), y.create_graph()
return input_fn, None
df = data_feeder.setup_train_data_feeder(x, y,
n_classes=None,
batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
def _get_predict_input_fn(x, batch_size):
# TODO(ipoloshukin): Remove this when refactor of data_feeder is done
if hasattr(x, 'create_graph'):
def input_fn():
return x.create_graph()
return input_fn, None
df = data_feeder.setup_train_data_feeder(x, None,
n_classes=None,
batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
class BaseEstimator(sklearn.BaseEstimator):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
It may override _get_default_metric_functions.
`Estimator` implemented below is a good example of how to use this class.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
"""
__metaclass__ = abc.ABCMeta
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None):
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.info('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
self._config = BaseEstimator._Config()
# Set device function depending if there are replicas or not.
if self._config.num_ps_replicas > 0:
ps_ops = ['Variable', 'AutoReloadVariable']
self._device_fn = device_setter.replica_device_setter(
ps_tasks=self._config.num_ps_replicas,
merge_devices=False, ps_ops=ps_ops)
else:
self._device_fn = None
# Features and targets TensorSingature objects.
self._features_info = None
self._targets_info = None
@abc.abstractproperty
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
pass
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: `dict` of functions that take predictions and targets.
Returns:
metrics: `dict` of `Tensor` objects.
"""
predictions = self._get_predict_ops(features)
result = {}
for name, metric in six.iteritems(metrics):
result[name] = metric(predictions, targets)
return result
def _get_feature_ops_from_example(self, examples_batch):
"""Method that returns features given the batch of examples.
This method will be used to export model into a server.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
"""
raise NotImplementedError('_get_feature_ops_from_example not implemented '
'in BaseEstimator')
def _get_default_metric_functions(self):
"""Method that provides default metric operations.
This functions is intented to be overridden by sub-classes.
Returns:
`dict` of functions that take predictions and targets `Tensor` objects and
return `Tensor`.
"""
return {}
def fit(self, x, y, steps, batch_size=32, monitor=None):
"""Trains a model given training data X and y.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: number of steps to train model for.
batch_size: minibatch size to use on the input, defaults to 32.
monitor: monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitor=monitor)
def train(self, input_fn, steps, monitor=None):
"""Trains a model given input builder function.
Args:
input_fn: Input builder function, returns tuple of dicts or
dict and Tensor.
steps: number of steps to train model for.
monitor: monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
return self._train_model(input_fn=input_fn, steps=steps, monitor=monitor)
def partial_fit(self, x, y, steps=1, batch_size=32, monitor=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
steps: number of steps to train model for.
batch_size: minibatch size to use on the input, defaults to 32.
monitor: Monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitor=monitor)
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=32, steps=100, metrics=None):
"""Evaluates given model with provided evaluation data.
Args:
x: features.
y: targets.
input_fn: Input function. If set, x and y must be None.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration.
batch_size: minibatch size to use on the input, defaults to 32. Ignored
if input_fn is set.
steps: Number of steps to evalute for.
metrics: Dict of metric ops to run.
Returns:
Returns self.
Raises:
ValueError: If x or y are not None while input_fn or feed_fn is not None.
"""
if (x is not None or y is not None) and input_fn is not None:
raise ValueError('Either x and y or input_fn must be None.')
if input_fn is None:
assert x is not None
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn,
steps=steps, metrics=metrics)
def predict(self, x, axis=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
axis: Axis on which to argmax. (for classification).
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
return self._infer_model(x=x, batch_size=batch_size, axis=axis)
def predict_proba(self, x, batch_size=None):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
batch_size: OVerride default batch size.
Returns:
Numpy array of predicted probabilities.
"""
return self._infer_model(x=x, batch_size=batch_size, proba=True)
def _check_inputs(self, features, targets):
if self._features_info is not None:
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
if self._targets_info is not None:
if not tensor_signature.tensors_compatible(targets, self._targets_info):
raise ValueError('Targets are incompatible with given information. '
'Given targets: %s, required signatures: %s.' %
(str(targets), str(self._targets_info)))
else:
self._targets_info = tensor_signature.create_signatures(targets)
def _train_model(self,
input_fn,
steps,
feed_fn=None,
device_fn=None,
monitor=None,
log_every_steps=100,
fail_on_nan_loss=True):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(self._config.training_worker_max_startup_secs,
self._config.task *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
train_op, loss_op = self._get_train_ops(features, targets)
return train(
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
log_every_steps=log_every_steps,
supervisor_is_chief=(self._config.task == 0),
supervisor_master=self._config.master,
feed_fn=feed_fn,
max_steps=steps,
fail_on_nan_loss=fail_on_nan_loss)
def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):
if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):
return
checkpoint_path = saver.latest_checkpoint(self._model_dir)
eval_dir = os.path.join(self._model_dir, 'eval')
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
eval_dict = self._get_eval_ops(features, targets, metrics or
self._get_default_metric_functions())
eval_results, _ = evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
global_step_tensor=global_step,
supervisor_master=self._config.master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results
def _infer_model(self, x, batch_size=None, axis=None, proba=False):
# Converts inputs into tf.DataFrame / tf.Series.
batch_size = -1 if batch_size is None else batch_size
input_fn, feed_fn = _get_predict_input_fn(x, batch_size)
checkpoint_path = saver.latest_checkpoint(self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features, _ = input_fn()
feed_dict = feed_fn() if feed_fn is not None else None
predictions = self._get_predict_ops(features)
if not isinstance(predictions, dict):
predictions = {'predictions': predictions}
# TODO(ipolosukhin): Support batching
return infer(checkpoint_path, predictions, feed_dict=feed_dict)
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
Parameters:
model_fn: Model function, takes features and targets tensors or dicts of
tensors and returns predictions and loss tensors.
E.g. `(features, targets) -> (predictions, loss)`.
model_dir: Directory to save model parameters, graph and etc.
classification: boolean, true if classification problem.
learning_rate: learning rate for the model.
optimizer: optimizer for the model, can be:
string: name of optimizer, like 'SGD', 'Adam', 'Adagrad', 'Ftl',
'Momentum', 'RMSProp', 'Momentum').
Full list in contrib/layers/optimizers.py
class: sub-class of Optimizer
(like tf.train.GradientDescentOptimizer).
clip_gradients: clip_norm value for call to `clip_by_global_norm`. None
denotes no gradient clipping.
"""
def __init__(self,
model_fn=None,
model_dir=None,
classification=True,
learning_rate=0.01,
optimizer='SGD',
clip_gradients=None):
super(Estimator, self).__init__(model_dir=model_dir)
self._model_fn = model_fn
self._classification = classification
if isinstance(optimizer, six.string_types):
if optimizer not in layers.OPTIMIZER_CLS_NAMES:
raise ValueError(
'Optimizer name should be one of [%s], you provided %s.' %
(', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=self.learning_rate,
optimizer=self.optimizer,
clip_gradients=self.clip_gradients)
return train_op, loss
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: `dict` of functions that take predictions and targets.
Returns:
metrics: `dict` of `Tensor` objects.
"""
predictions, loss = self._model_fn(features, targets, ModeKeys.EVAL)
result = {'loss': loss}
if isinstance(targets, dict) and len(targets) == 1:
# Unpack single target into just tensor.
targets = targets[targets.keys()[0]]
for name, metric in six.iteritems(metrics):
# TODO(ipolosukhin): Add support for multi-head metrics.
result[name] = metric(predictions, targets)
return result
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
predictions, _ = self._model_fn(features, targets, ModeKeys.INFER)
return predictions
def _get_default_metric_functions(self):
"""Method that provides default metric operations.
Returns:
a dictionary of metric operations.
"""
return _EVAL_METRICS[
'classification' if self._classification else 'regression']
def _get_feature_ops_from_example(self, examples_batch):
"""Unimplemented.
TODO(vihanjain): We need a way to parse tf.Example into features.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
Exception: Unimplemented
"""
raise NotImplementedError('_get_feature_ops_from_example not yet '
'implemented')
| apache-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/tests/test_image.py | 10 | 10783 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
from matplotlib.image import BboxImage, imread
from matplotlib.transforms import Bbox
from matplotlib import rcParams
import matplotlib.pyplot as plt
from nose.tools import assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import io
import os
try:
from PIL import Image
HAS_PIL = True
except ImportError:
HAS_PIL = False
@image_comparison(baseline_images=['image_interps'])
def test_image_interps():
'make the basic nearest, bilinear and bicubic interps'
X = np.arange(100)
X = X.reshape(5, 20)
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax1.imshow(X, interpolation='nearest')
ax1.set_title('three interpolations')
ax1.set_ylabel('nearest')
ax2 = fig.add_subplot(312)
ax2.imshow(X, interpolation='bilinear')
ax2.set_ylabel('bilinear')
ax3 = fig.add_subplot(313)
ax3.imshow(X, interpolation='bicubic')
ax3.set_ylabel('bicubic')
@image_comparison(baseline_images=['interp_nearest_vs_none'],
extensions=['pdf', 'svg'], remove_text=True)
def test_interp_nearest_vs_none():
'Test the effect of "nearest" and "none" interpolation'
# Setting dpi to something really small makes the difference very
# visible. This works fine with pdf, since the dpi setting doesn't
# affect anything but images, but the agg output becomes unusably
# small.
rcParams['savefig.dpi'] = 3
X = np.array([[[218, 165, 32], [122, 103, 238]],
[[127, 255, 0], [255, 99, 71]]], dtype=np.uint8)
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.imshow(X, interpolation='none')
ax1.set_title('interpolation none')
ax2 = fig.add_subplot(122)
ax2.imshow(X, interpolation='nearest')
ax2.set_title('interpolation nearest')
@image_comparison(baseline_images=['figimage-0', 'figimage-1'], extensions=['png'])
def test_figimage():
'test the figimage method'
for suppressComposite in False, True:
fig = plt.figure(figsize=(2,2), dpi=100)
fig.suppressComposite = suppressComposite
x,y = np.ix_(np.arange(100.0)/100.0, np.arange(100.0)/100.0)
z = np.sin(x**2 + y**2 - x*y)
c = np.sin(20*x**2 + 50*y**2)
img = z + c/5
fig.figimage(img, xo=0, yo=0, origin='lower')
fig.figimage(img[::-1,:], xo=0, yo=100, origin='lower')
fig.figimage(img[:,::-1], xo=100, yo=0, origin='lower')
fig.figimage(img[::-1,::-1], xo=100, yo=100, origin='lower')
@cleanup
def test_image_python_io():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1,2,3])
buffer = io.BytesIO()
fig.savefig(buffer)
buffer.seek(0)
plt.imread(buffer)
@knownfailureif(not HAS_PIL)
def test_imread_pil_uint16():
img = plt.imread(os.path.join(os.path.dirname(__file__),
'baseline_images', 'test_image', 'uint16.tif'))
assert (img.dtype == np.uint16)
assert np.sum(img) == 134184960
# def test_image_unicode_io():
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot([1,2,3])
# fname = u"\u0a3a\u0a3a.png"
# fig.savefig(fname)
# plt.imread(fname)
# os.remove(fname)
def test_imsave():
# The goal here is that the user can specify an output logical DPI
# for the image, but this will not actually add any extra pixels
# to the image, it will merely be used for metadata purposes.
# So we do the traditional case (dpi == 1), and the new case (dpi
# == 100) and read the resulting PNG files back in and make sure
# the data is 100% identical.
from numpy import random
random.seed(1)
data = random.rand(256, 128)
buff_dpi1 = io.BytesIO()
plt.imsave(buff_dpi1, data, dpi=1)
buff_dpi100 = io.BytesIO()
plt.imsave(buff_dpi100, data, dpi=100)
buff_dpi1.seek(0)
arr_dpi1 = plt.imread(buff_dpi1)
buff_dpi100.seek(0)
arr_dpi100 = plt.imread(buff_dpi100)
assert arr_dpi1.shape == (256, 128, 4)
assert arr_dpi100.shape == (256, 128, 4)
assert_array_equal(arr_dpi1, arr_dpi100)
def test_imsave_color_alpha():
# Test that imsave accept arrays with ndim=3 where the third dimension is
# color and alpha without raising any exceptions, and that the data is
# acceptably preserved through a save/read roundtrip.
from numpy import random
random.seed(1)
data = random.rand(256, 128, 4)
buff = io.BytesIO()
plt.imsave(buff, data)
buff.seek(0)
arr_buf = plt.imread(buff)
# Recreate the float -> uint8 -> float32 conversion of the data
data = (255*data).astype('uint8').astype('float32')/255
# Wherever alpha values were rounded down to 0, the rgb values all get set
# to 0 during imsave (this is reasonable behaviour).
# Recreate that here:
for j in range(3):
data[data[:, :, 3] == 0, j] = 1
assert_array_equal(data, arr_buf)
@image_comparison(baseline_images=['image_clip'])
def test_image_clip():
from math import pi
fig = plt.figure()
ax = fig.add_subplot(111, projection='hammer')
d = [[1,2],[3,4]]
im = ax.imshow(d, extent=(-pi,pi,-pi/2,pi/2))
@image_comparison(baseline_images=['image_cliprect'])
def test_image_cliprect():
import matplotlib.patches as patches
fig = plt.figure()
ax = fig.add_subplot(111)
d = [[1,2],[3,4]]
im = ax.imshow(d, extent=(0,5,0,5))
rect = patches.Rectangle(xy=(1,1), width=2, height=2, transform=im.axes.transData)
im.set_clip_path(rect)
@image_comparison(baseline_images=['imshow'], remove_text=True)
def test_imshow():
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
arr = np.arange(100).reshape((10, 10))
ax = fig.add_subplot(111)
ax.imshow(arr, interpolation="bilinear", extent=(1,2,1,2))
ax.set_xlim(0,3)
ax.set_ylim(0,3)
@image_comparison(baseline_images=['no_interpolation_origin'], remove_text=True)
def test_no_interpolation_origin():
fig = plt.figure()
ax = fig.add_subplot(211)
ax.imshow(np.arange(100).reshape((2, 50)), origin="lower", interpolation='none')
ax = fig.add_subplot(212)
ax.imshow(np.arange(100).reshape((2, 50)), interpolation='none')
@image_comparison(baseline_images=['image_shift'], remove_text=True,
extensions=['pdf', 'svg'])
def test_image_shift():
from matplotlib.colors import LogNorm
imgData = [[1.0/(x) + 1.0/(y) for x in range(1,100)] for y in range(1,100)]
tMin=734717.945208
tMax=734717.946366
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(imgData, norm=LogNorm(), interpolation='none',
extent=(tMin, tMax, 1, 100))
ax.set_aspect('auto')
@cleanup
def test_image_edges():
f = plt.figure(figsize=[1, 1])
ax = f.add_axes([0, 0, 1, 1], frameon=False)
data = np.tile(np.arange(12), 15).reshape(20, 9)
im = ax.imshow(data, origin='upper',
extent=[-10, 10, -10, 10], interpolation='none',
cmap='gray'
)
x = y = 2
ax.set_xlim([-x, x])
ax.set_ylim([-y, y])
ax.set_xticks([])
ax.set_yticks([])
buf = io.BytesIO()
f.savefig(buf, facecolor=(0, 1, 0))
buf.seek(0)
im = plt.imread(buf)
r, g, b, a = sum(im[:, 0])
r, g, b, a = sum(im[:, -1])
assert g != 100, 'Expected a non-green edge - but sadly, it was.'
@image_comparison(baseline_images=['image_composite_background'], remove_text=True)
def test_image_composite_background():
fig = plt.figure()
ax = fig.add_subplot(111)
arr = np.arange(12).reshape(4, 3)
ax.imshow(arr, extent=[0, 2, 15, 0])
ax.imshow(arr, extent=[4, 6, 15, 0])
ax.set_axis_bgcolor((1, 0, 0, 0.5))
ax.set_xlim([0, 12])
@image_comparison(baseline_images=['image_composite_alpha'], remove_text=True)
def test_image_composite_alpha():
"""
Tests that the alpha value is recognized and correctly applied in the
process of compositing images together.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
arr = np.zeros((11, 21, 4))
arr[:, :, 0] = 1
arr[:, :, 3] = np.concatenate((np.arange(0, 1.1, 0.1), np.arange(0, 1, 0.1)[::-1]))
arr2 = np.zeros((21, 11, 4))
arr2[:, :, 0] = 1
arr2[:, :, 1] = 1
arr2[:, :, 3] = np.concatenate((np.arange(0, 1.1, 0.1), np.arange(0, 1, 0.1)[::-1]))[:, np.newaxis]
ax.imshow(arr, extent=[1, 2, 5, 0], alpha=0.3)
ax.imshow(arr, extent=[2, 3, 5, 0], alpha=0.6)
ax.imshow(arr, extent=[3, 4, 5, 0])
ax.imshow(arr2, extent=[0, 5, 1, 2])
ax.imshow(arr2, extent=[0, 5, 2, 3], alpha=0.6)
ax.imshow(arr2, extent=[0, 5, 3, 4], alpha=0.3)
ax.set_axis_bgcolor((0, 0.5, 0, 1))
ax.set_xlim([0, 5])
ax.set_ylim([5, 0])
@image_comparison(baseline_images=['rasterize_10dpi'], extensions=['pdf','svg'], tol=5e-2, remove_text=True)
def test_rasterize_dpi():
# This test should check rasterized rendering with high output resolution.
# It plots a rasterized line and a normal image with implot. So it will catch
# when images end up in the wrong place in case of non-standard dpi setting.
# Instead of high-res rasterization i use low-res. Therefore the fact that the
# resolution is non-standard is is easily checked by image_comparison.
import numpy as np
import matplotlib.pyplot as plt
img = np.asarray([[1, 2], [3, 4]])
fig, axes = plt.subplots(1, 3, figsize = (3, 1))
axes[0].imshow(img)
axes[1].plot([0,1],[0,1], linewidth=20., rasterized=True)
axes[1].set(xlim = (0,1), ylim = (-1, 2))
axes[2].plot([0,1],[0,1], linewidth=20.)
axes[2].set(xlim = (0,1), ylim = (-1, 2))
# Low-dpi PDF rasterization errors prevent proper image comparison tests.
# Hide detailed structures like the axes spines.
for ax in axes:
ax.set_xticks([])
ax.set_yticks([])
for spine in ax.spines.values():
spine.set_visible(False)
rcParams['savefig.dpi'] = 10
@image_comparison(baseline_images=['bbox_image_inverted'],
extensions=['png', 'pdf'])
def test_bbox_image_inverted():
# This is just used to produce an image to feed to BboxImage
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot([1, 2, 3])
im_buffer = io.BytesIO()
fig.savefig(im_buffer)
im_buffer.seek(0)
image = imread(im_buffer)
bbox_im = BboxImage(Bbox([[100, 100], [0, 0]]))
bbox_im.set_data(image)
axes.add_artist(bbox_im)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| mit |
michaelaye/planetpy | planetarypy/pdstools/scraper.py | 1 | 1520 | from pathlib import Path
from string import Template
import pandas as pd
class CTXIndex:
volumes_url = "https://pds-imaging.jpl.nasa.gov/volumes/mro.html"
release_url_template = \
Template(
"https://pds-imaging.jpl.nasa.gov/volumes/mro/release${release}.html")
volume_url_template = \
Template(
"https://pds-imaging.jpl.nasa.gov/data/mro/mars_reconnaissance_orbiter/ctx/mrox_${volume}/")
@property
def web_tables_list(self):
print("Scraping volumes page ...")
return pd.read_html(self.volumes_url)
@property
def release_number(self):
l = self.web_tables_list
# The last item of last table looks like "Release XX"
return l[-1].iloc[-1, 0].split()[-1]
@property
def release_url(self):
return self.release_url_template.substitute(release=self.release_number)
@property
def latest_volume_url(self):
print("Scraping latest release page ...")
l = pd.read_html(self.release_url)
# get last row of 4th table
row = l[3].iloc[-1]
number = None
# first number that is NAN breaks the loop over last row of table
for elem in row.values:
try:
number = int(elem.split()[-1])
except AttributeError:
break
return self.volume_url_template.substitute(volume=number)
@property
def latest_index_label_url(self):
return self.latest_volume_url + 'index/cumindex.lbl'
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
DavidBreuer/CytoSeg | CytoSeg/utils.py | 1 | 74513 | # -*- coding: utf-8 -*-
################################################################################
# Module: utils.py
# Description: Test imports and network extraction
# License: GPL3, see full license in LICENSE.txt
# Web: https://github.com/DavidBreuer/CytoSeg
################################################################################
#%%############################################################################# imports
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import random
import scipy as sp
import scipy.misc
import scipy.ndimage
import scipy.optimize
import scipy.spatial
import scipy.stats
import scipy.cluster
import skimage
import skimage.filters
import skimage.morphology
import skimage.feature
import skimage.segmentation
import shapely
import shapely.geometry
import sys
import xml
import xml.dom
import xml.dom.minidom
import utils
#%%############################################################################# help functions
def xmlread(name,threed=0):
"""Read Fiji-Trackmate xml file to Python list of lists.
Parameters
----------
name : name and directory of xml file
threed : set to 1 for three-dimensional data
Returns
-------
T : list of tracks
"""
xmldoc=xml.dom.minidom.parse(name)
spots=xmldoc.getElementsByTagName('Spot')
tracs=xmldoc.getElementsByTagName('Track')
S=[]
N=[]
for spot in spots:
n=int(spot.attributes['ID'].value)
t=float(spot.attributes['POSITION_T'].value)
x=float(spot.attributes['POSITION_X'].value)
y=float(spot.attributes['POSITION_Y'].value)
if(threed): z=float(spot.attributes['POSITION_Z'].value)
else: z=0
mi=float(spot.attributes['MEAN_INTENSITY'].value)
mt=float(spot.attributes['TOTAL_INTENSITY'].value)
mq=float(spot.attributes['QUALITY'].value)
md=float(spot.attributes['ESTIMATED_DIAMETER'].value)
N.append(n)
S.append([n,t,x,y,z,mi,mt,mq,md])
T=[]
for trac in tracs:
n=int(trac.attributes['TRACK_ID'].value)
dur=int(float(trac.attributes['TRACK_DURATION'].value))
dis=float(trac.attributes['TRACK_DISPLACEMENT'].value)
edges=trac.getElementsByTagName('Edge')
E=[]
for edge in edges:
id0=int(edge.attributes['SPOT_SOURCE_ID'].value)
id1=float(edge.attributes['SPOT_TARGET_ID'].value)
vel=float(edge.attributes['VELOCITY'].value)
n0=N.index(id0)
n1=N.index(id1)
m0,t0,x0,y0,z0,mi0,mt0,mq0,md0=S[n0]
m1,t1,x1,y1,z1,mi1,mt1,mq1,md1=S[n1]
E.append([t0,x0,y0,z0,mi0,mt0,mq0,md0,t1,x1,y1,z1,mi1,mt1,mq1,md1])
E=np.array(E)
if(len(E)>0):
E=E[E[:,0].argsort()]
T.append(E)
return T
def angle360(dxy):
"""Compute angle of two-dimensional vector relative to y-axis in degrees.
Parameters
----------
dxy : two-dimensional vector
Returns
-------
angle : angle in degrees
"""
dx,dy=dxy
rad2deg=180.0/np.pi
angle=np.mod(np.arctan2(-dx,-dy)*rad2deg+180.0,360.0)
return angle
def im2d3d(im):
"""Convert two-dimensional array to three-dimensional array.
Parameters
----------
im : array or image
Returns
-------
im : array or image
"""
if(len(im.shape)==2):
im=im[:,:,np.newaxis]
else:
im=im
return im
def remove_duplicates(points):
"""Remove duplicates from list.
Parameters
----------
points : list
Returns
-------
pointz : list without duplicates
"""
pointz=pd.DataFrame(points).drop_duplicates().values
return pointz
def tube_filter(imO,sigma):
"""Apply tubeness filter to image.
Parameters
----------
imO : original two-dimensional image
sigma : width parameter of tube-like structures
Returns
-------
imT : filtered and rescaled image
"""
imH=skimage.feature.hessian_matrix(imO,sigma=sigma,mode='reflect')
imM=skimage.feature.hessian_matrix_eigvals(imH[0],imH[1],imH[2])
imR=-1.0*imM[1]
imT=255.0*(imR-imR.min())/(imR.max()-imR.min())
return imT
def cell_sample(mask,R):
"""Sample random points uniformly across masked area.
Parameters
----------
mask : sampling area
R : number of sampling points
Returns
-------
coords : sampled random points
"""
wh=np.array(np.where(mask)).T
W=len(wh)
idx=sp.random.randint(0,W,R)
coords=wh[idx]+sp.rand(R,2)
return coords
def multi_line_intersect(seg,segs):
"""Check intersections of line segments.
Parameters
----------
seg : single line segment
sigma : multiple line segments
Returns
-------
intersects : Boolean array indicating intersects
"""
intersects=np.array([False])
if(len(segs)>0):
d3=segs[:,1,:]-segs[:,0,:]
d1=seg[1,:]-seg[0,:]
c1x=np.cross(d3,seg[0,:]-segs[:,0,:])
c1y=np.cross(d3,seg[1,:]-segs[:,0,:])
c3x=np.cross(d1,segs[:,0,:]-seg[0,:])
c3y=np.cross(d1,segs[:,1,:]-seg[0,:])
intersect=np.logical_and(c1x*c1y<0,c3x*c3y<0)
return intersects
def bounds(x,xmin,xmax):
"""Restrict number to interval.
Parameters
----------
x : number
xmin : lower bound
xmax : upper bound
Returns
-------
x : bounded number
"""
if(x<xmin):
x=xmin
elif(x>xmax):
x=xmax
return x
def node_condense(imM,imG,ones):
"""Condense neighboring to single node located at center of mass.
Parameters
----------
imM : binary node array (0 = background; 1 = nodes)
imG : gray-scale intensity image
ones : array defining neighborhood structure
Returns
-------
imL : condensed and labeled node array (0 = background; 1-N = nodes)
"""
imL,N=sp.ndimage.label(imM,structure=ones) # label nodes
sizes=sp.ndimage.sum(imL>0,imL,range(1,N+1)) # compute size of nodes (clusters)
coms=sp.ndimage.center_of_mass(imG,imL,range(1,N+1)) # compute center of mass of nodes (clusters)
for n in range(N): # for each node...
if(sizes[n]>1): # if cluster...
idx=(imL==n+1) # get cluster coordinates
idm=tuple(np.add(coms[n],0.5).astype('int')) # get center of mass coordinates
imL[idx]=0 # remove node cluster
imL[idm]=n+1 # set node at center of mass
imL,N=sp.ndimage.label(imL>0,structure=ones) # label nodes
imL=imL.astype('int')
return imL
def node_find(im):
"""Find nodes in binary filament image.
Parameters
----------
im : section of binary filament image
Returns
-------
val : central pixel of image section (0 = not a node; 1 = node)
"""
ims=np.reshape(im,(3,3,3)) # convert image section of 3x3x3 array
val=0
if(ims[1,1,1]==1): # if central pixel lies on filament...
ims[1,1,1]=0 # remove central pixel
iml,L=sp.ndimage.label(ims) # label remaining filaments
if(L!=0 and L!=2): # if there is one (set end node) or more than two filaments (set crossing node)...
val=1 # set node
return val
def connected_components(graph):
"""Compute connected components of graph after removal of edges with capacities below 50th percentile.
Parameters
----------
graph : original graph
Returns
-------
ca : list of sizes of connected components
"""
gc=graph.copy()
edges=gc.edges(data=True)
ec=1.0*np.array([d['capa'] for u,v,d in edges])
perc=np.percentile(ec,50.0)
for u,v,d in edges:
if d['capa']<=perc:
gc.remove_edge(u,v)
cc=nx.connected_components(gc)
ca=np.array([len(c) for c in cc])
return ca
def path_lengths(graph):
"""Compute shortest path lengths.
Parameters
----------
graph : original graph
Returns
-------
dist : array of shortest path lengths
"""
dists=nx.all_pairs_dijkstra_path_length(graph,weight='lgth')
dist=np.array([[v for v in u.values()] for u in dists.values()])
dist=np.tril(dist)
dist[dist==0]=np.nan
return dist
def edge_angles(graph,pos,mask):
"""Compute distribution of angles between network edges and cell axis.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
degs : list of angles between edges and cell axis
"""
c0,c1,vc,vd,an,rot=utils.mask2rot(mask) # compute angle of cell axis
degs=[]
for u,v,d in graph.edges(data=True): # for each edge...
degs.append(np.mod(utils.angle360(1.0*(pos[u]-pos[v]))+360.0-an,180.0)) # compute angle between edge and cell axis
return degs
def crossing_number(graph,pos):
"""Compute number of edge intersections per edge.
Parameters
----------
graph : original graph
pos : node positions
Returns
-------
cns : list of edge crossing numbers
"""
ee=np.array(graph.edges()) # get edge edges
edges=[]
cns=[]
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # append edge as line segment
edges.append(edge)
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
idx=(ee[:,0]!=n1)*(ee[:,1]!=n1)*(ee[:,0]!=n2)*(ee[:,1]!=n2) # exclude edge that share a node with the selected edge
idx[i]=False # exclude selected edge itself
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # treat edge as line segment
cross=utils.multi_line_intersect(np.array(edge),np.array(edges)[idx]) # check intersections of selected edge with remaining edges
cns.append(cross.sum()) # append crossing number of selected edge
return cns
#%%############################################################################# graph functions
def skeletonize_graph(imO,mask,sigma,block,small,factr):
"""Filter and skeletonize image of filament structures.
Parameters
----------
imO : original image
mask : binary array of cellular region of interest
sigma : width of tubeness filter and filament structures
block : block size of adaptive median filter
small : size of smallest components
factr : fraction of average intensity below which components are removed
Returns
-------
imR : image after application of tubeness filter
imA : filtered and skeletonized image
"""
imO-=imO[mask].min()
imO*=255.0/imO.max()
ly,lx,lz=imO.shape
imR=imO.copy()*0
imT=imO.copy()*0
for z in range(lz):
imR[:,:,z]=tube_filter(imO[:,:,z],sigma)
imT[:,:,z]=skimage.filters.threshold_adaptive(imR[:,:,z],block)
imS=skimage.morphology.skeletonize_3d(imT>0)
ones=np.ones((3,3,3))
imC=skimage.morphology.remove_small_objects(imS,small,connectivity=2)>0
for z in range(lz):
imC[:,:,z]=imC[:,:,z]*mask
imC=imC>0
imL,N=sp.ndimage.label(imC,structure=ones)
mean=imO[imC].mean()
means=[np.mean(imO[imL==n]) for n in range(1,N+1)]
imA=1.0*imC.copy()
for n in range(1,N+1):
if(means[n-1]<mean*factr):
imA[imL==n]=0
imA=skimage.morphology.remove_small_objects(imA>0,2,connectivity=8)
return imR,imA
def node_graph(imA,imG):
"""Construct image indicating background (=0), filaments (=1), and labeled nodes (>1).
Parameters
----------
imA : skeletonized image of filament structures
imG : Gaussian filtered image of filament structures
Returns
-------
imE : image indicating background, filaments, and nodes
"""
ones=np.ones((3,3,3)) # neighborhood structure of pixel
imM=sp.ndimage.generic_filter(imA,utils.node_find,footprint=ones,mode='constant',cval=0) # find nodes as endpoints or crossings of filaments
imN=utils.node_condense(imM,imG,ones) # condense neighboring nodes
imL=skimage.segmentation.relabel_sequential(imN)[0] # relabel nodes
imB,B=sp.ndimage.label(imA,structure=ones) # label components of skeletoninzed image
for b in range(1,B+1): # for each component...
no=np.max((imB==b)*(imL>0)) # if component does not contain node...
if(no==0):
imA[imB==b]=0 # remove component
imE=1*((imA+imL)>0)+imL # construct image indicating background (=0) filaments (=1) and labeled nodes (>1).
return imE
def make_graph(imE,imG):
"""Construct network representation from image of filament structures.
Parameters
----------
imE : image indicating background (=0), filaments (=1), and labeled nodes (>1)
imG : Gaussian filtered image of filament structures
Returns
-------
graph : network representation of filament structures
pos : node positions
"""
N=imE.max()-1 # number of nodes
sq2=np.sqrt(2.0) # distance between diagonal pixels
sq3=np.sqrt(3.0) # distance between room diagonal pixels
diag=np.array([[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]],[[sq2,1,sq2],[1,0,1],[sq2,1,sq2]],[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]]]) # distance matrix of 3x3x3 neighborhood
pos=np.array(np.where(imE>1)).T[:,::-1].astype('int') # node positions
pos=pos[:,[1,2,0]] # change order of node positions (x,y,z)
imY=imE.copy() # array to propagate nodes
imL=1.0*(imE.copy()>0) # array to remember summed length of filament up to current position
imS=1.0*(imE.copy()>0) # array to remember summed intensity of filament up to current position
ly,lx,lz=imE.shape # get image dimensions
ys=(imY==1).sum() # get points in image which are neither background (=0), nor nodes (>1), but filament (=1)
while(ys>0): # while there is still "filament" in the image
c=np.transpose(np.where(imY>1)) # positions of node pixels (>1)
for y,x,z in c: # for each node pixel (>1)...
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of node array
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament length array
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament intensity array
imY[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(sec==1,imY[y,x,z],sec) # if 3x3x3 neighborhood contains node (>1) set all filament pixels to this node index
imL[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(lgt==1,diag[0:ymax-ymin,0:xmax-xmin,0:zmax-zmin]+imL[y,x,z],lgt) # if 3x3x3 neighborhood contains filament, increase straight/diagonal/room diagonal surrounding pixels in length array by 1/sqrt(2)/sqrt(3), respectively
imS[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(stg==1,imG[y,x,z]+imS[y,x,z],stg) # if 3x3x3 neighborhood contains filament, increase intensity array by intensity of the original image
ys=(imY==1).sum() # compute remaining amout of filament
graph=nx.empty_graph(N,nx.MultiGraph()) # create empty multi graph
ys,xs,zs=np.where(imY>1) # get all labeled filament pixels
for y,x,z in zip(ys,xs,zs): # for each labeled filament pixel...
xy=imY[y,x,z] # get node index
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax].flatten() # get 3x3x3 neighborhood of filament image
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
for idx,i in enumerate(sec): # check all pixels in 3x3x3 neighborhood...
if(i!=xy and i>1): # if the center and neighboring pixels have different labels...
u,v=np.sort([xy-2,i-2]) # sort nodes to avoid adding bidirectional edges (A->B and B->A)
edist=sp.linalg.norm(pos[u]-pos[v]) # compute Euklidean distance between the corresponding nodes
fdist=imL[y,x,z]+lgt[idx] # compute sum of the two partial filament lengths
weight=imS[y,x,z]+stg[idx] # compute sum of the two partial filament intensities
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to original, non-connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
graph.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump) # add edge to network
return graph,pos
def unify_graph(graph):
"""Project multigraph to simple graph.
Parameters
----------
graph : original graph
Returns
-------
graphz : simple graph
"""
graphz=nx.empty_graph(graph.number_of_nodes()) # construct new empty graph with the same number of nodes
for u,v,d in graph.edges(data=True): # for each edge in the multigraph...
edist=d['edist'] # get edge properties
fdist=d['fdist']
weight=d['weight']
capa=d['capa']
lgth=d['lgth']
conn=d['conn']
jump=d['jump']
multi=1 # set edge multiplicity to one
if graphz.has_edge(u,v): # if simple graph already contains the edge in question...
graphz[u][v]['multi']+=1.0 # increase edge multiplicity by one
graphz[u][v]['capa']+=capa # compute sum of edge capacities
if(graphz[u][v]['lgth']>lgth): # compute minimum of edge lengths
graphz[u][v]['lgth']=lgth
else:
graphz.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to simple graph otherwise
return graphz
def connect_graph(graph,pos,imG):
"""Connect graph by adding edges of minimum edge length.
Parameters
----------
graph : original graph
pos : node positions
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : connect graph
"""
dists=sp.spatial.distance_matrix(pos,pos) # compute distance matrix between all node positions
graphz=graph.copy() # copy original graph
N=graphz.number_of_nodes() # get number of nodes
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
while len(comp)>1: # while network is disconnected...
compo=comp[0] # get nodes in largest component
compl=list(compo)
compi=list(set(range(N)).difference(compo)) # get remaining nodes
dist=dists[compl][:,compi] # get distance matrix between nodes of largest component and remaining network
n0,ni=np.unravel_index(dist.argmin(),dist.shape) # find pair of nodes with minimum distance
p0,pi=pos[compl][n0],pos[compi][ni]
edist=sp.linalg.norm(p0-pi) # compute distance between nodes
edist=max(1.0,edist) # set minimum distance between nodes
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
aa=np.array([p0[0],p0[1],pi[0],pi[1]]) # draw line between nodes
yy,xx=skimage.draw.line(*aa.astype('int'))
zz=(np.linspace(p0[2],pi[2],len(xx))).astype('int')
weight=np.sum(imG[xx,yy,zz]) # compute edge weight as image intensity along line
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=1 # set edge connectivity variable indicating that edge belongs to new, connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(compi[ni],compl[n0],edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
return graphz
def randomize_graph(graph,pos,mask,planar=0,weights=0,iterations=1000):
"""Randomize graph by shuffling node positions and edges or edge capacities only.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
planar : ignore edge crossings (=0) or favor planar graph by reducing number of edge crossings (=1)
weights : shuffle only edge capacities (=0) or node positions and edges (=1)
iterations : number of iterations before returning original graph
Returns
-------
graphz : randomized graph
poz : randomized node positions
"""
if(weights==0): # if shuffling of edge capacities only...
ec=np.array([d for u,v,d in graph.edges(data=True)]) # get edge properties
random.shuffle(ec) # shuffle edge capacities
graphz=graph.copy() # copy graph
for j,(u,v,d) in enumerate(graphz.edges(data=True)): # for each edge...
for k in d.keys(): # copy shuffled edge properties
d[k]=ec[j][k]
poz=pos # copy node positions
else: # shuffling of node positions and edges otherwise
N=graph.number_of_nodes() # get node number
E=graph.number_of_edges() # get edge number
graphz=nx.empty_graph(N,nx.MultiGraph()) # create new, empty multigraph
diste=np.array([d['edist'] for u,v,d in graph.edges(data=True)]) # get Euclidean edge lengths
bins=[0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,9999] # set bin boundaries for edge lengths
B=len(bins)-1 # get number of bins
dibse=np.zeros(E).astype('int') # create array for assigning bin numbers to edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])): # for each bin...
ide=(diste>=b1)*(diste<b2) # get edges with Euclidean lengths in the given bin
dibse[ide]=i # assign bin number to edges
eweight=np.array([d['weight'] for u,v,d in graph.edges(data=True)]) # get edge weights
ecapa=np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
redo=1 # variable indicating that no suitable randomization was obtained yet
iteration=0 # number of iterations
while(redo==1 and iteration<iterations): # while neither a suitable randomization nor the number of allowed iterations were reached yet...
iteration+=1 # increase iteration by one
poz=utils.cell_sample(mask,N)[:,::-1].astype('int') # shuffle xy-components of node positions
zzz=pos[:,2] # keep z-component of node positions
poz=np.vstack([poz.T,zzz]).T # merge xyz-components of node positions
dista=scipy.spatial.distance_matrix(poz,poz) # compute distance matrix between new node positions
dibsa=np.zeros((N,N)).astype('int') # assign bin numbers to all new, potential edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])):
ida=(dista>=b1)*(dista<b2)
dibsa[ida]=i
dibsa[np.tri(N)>0]=-9999 # set lower part of the bin number matrix to negativ number to exclude loops (A->A) and bidirectional edges (A->B and B->A)
redo=1*np.max([(dibsa==b).sum()<(dibse==b).sum() for b in range(B)]) # check that each original edge can be accommodated given the new node positions
if(iteration<iterations): # if the number of allowed iterations was not reached yet...
isort=np.argsort(diste)[::-1] # sort bin assignments, edge weights, and edge capacities by Euclidean length
diste=diste[isort]
dibse=dibse[isort]
eweight=eweight[isort]
ecapa=ecapa[isort]
edges=[] # list of added edges
for e in range(E): # for each edge...
candidates=np.where(dibsa==dibse[e]) # get candidate pairs of new nodes whose distance matches the Euclidean length of the selected edge
C=len(candidates[0]) # get number of candidate pairs
cromm=9999 # dummy variable for number of edge crossings
ii=random.sample(range(C),min(50,C)) # select up to 50 candidate pairs
for i in ii: # for each candidate pair...
n1=candidates[0][i] # get nodes
n2=candidates[1][i]
edge=np.array([[poz[n1][0],poz[n2][0]],[poz[n1][1],poz[n2][1]]]).T # create line segment between candidate nodes
cross=planar*utils.multi_line_intersect(np.array(edge),np.array(edges)).sum() # compute number of line segment crossings with existing edges
if(cross<cromm and dibsa[n1,n2]>=0): # if edge is allowed and number of crossings is smaller than for previous candidates...
cromm=cross # store crossing number
edgem=edge # store edge
m1,m2=n1,n2 # store nodes
edges.append(edgem) # add edge to list of edges
edist=dista[m1,m2] # set Euclidean distance
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
weight=eweight[e] # set edge weight
capa=ecapa[e] # set edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to randomized, non-connected connected network
jump=0 # set edge jump variable indicating that edge belongs to randomized, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(m1,m2,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
dibsa[m1,m2]=-9999 # remove edge from allowed edges
dibsa[m2,m1]=-9999
else:
graphz,poz=graph,pos # copy original network and node positions otherwise
return graphz,poz
def centralize_graph(graph,epb='lgth',efb='capa',ndg='capa',nec='capa',npr='capa'):
"""Compute edge centralities.
Parameters
----------
graph : original graph
epb : edge property used for computation of edge path betweenness
efb : " flow betweenness
ndg : " degree centrality
nec : " eigenvector centrality
npr : " page rank
Returns
-------
graphz : graph with computed edge centralities
"""
graphz=graph.copy() # copy graph
edges=graphz.edges(data=True) # get edge capacities
ec=1.0*np.array([d['capa'] for u,v,d in edges])
ec/=ec.sum() # normalize edge capacities
el=1.0/ec
for i,(u,v,d) in enumerate(edges): # update edge capacities and lengths
d['capa']=ec[i]
d['lgth']=el[i]
epb=nx.edge_betweenness_centrality(graphz,weight=epb) # compute edge path betweenness
efb=nx.edge_current_flow_betweenness_centrality(graphz,weight=efb) # compute edge flow betweenness
lineg=nx.line_graph(graphz) # compute line graph
degree=graphz.degree(weight=ndg) # get capacity weighted edge degree
for u,v,d in lineg.edges(data=True): # set edge capacity of linegraph to node degree of original graph
n=list(set(u).intersection(v))[0]
d[ndg]=degree[n]
nec=nx.eigenvector_centrality_numpy(lineg,weight=ndg) # compute edge degree, eigenvector, and page rank centrality
npr=nx.pagerank(lineg,weight=ndg)
ndg=lineg.degree(weight=ndg)
for i,(u,v,d) in enumerate(edges): # set edge centralities
e=(u,v)
if(e in epb.keys()):
d['epb']=epb[e]
else:
d['epb']=epb[e[::-1]]
if(e in efb.keys()):
d['efb']=efb[e]
else:
d['efb']=efb[e[::-1]]
if(e in ndg.keys()):
d['ndg']=ndg[e]
else:
d['ndg']=ndg[e[::-1]]
if(e in nec.keys()):
d['nec']=nec[e]
else:
d['nec']=nec[e[::-1]]
if(e in npr.keys()):
d['npr']=npr[e]
else:
d['npr']=npr[e[::-1]]
return graphz
def normalize_graph(graph):
"""Normalize edge properties.
Parameters
----------
graph : original graph
Returns
-------
graph : graph with normalized edge properties
"""
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)])
ec/=ec.sum()
el=1.0/ec
el/=el.sum()
epb=1.0*np.array([d['epb'] for u,v,d in graph.edges(data=True)])
epb/=epb.sum()
efb=1.0*np.array([d['efb'] for u,v,d in graph.edges(data=True)])
efb/=efb.sum()
ndg=1.0*np.array([d['ndg'] for u,v,d in graph.edges(data=True)])
ndg/=ndg.sum()
nec=1.0*np.array([d['nec'] for u,v,d in graph.edges(data=True)])
nec/=nec.sum()
npr=1.0*np.array([d['npr'] for u,v,d in graph.edges(data=True)])
npr/=npr.sum()
for i,(u,v,d) in enumerate(graph.edges(data=True)):
d['capa']=ec[i]
d['lgth']=el[i]
d['epb']=epb[i]
d['efb']=efb[i]
d['ndg']=ndg[i]
d['nec']=nec[i]
d['npr']=npr[i]
return graph
def boundary_graph(jnet,graph,pos,SP,SL,JV,JH,imG,dthres=10.0,jthres=2.5):
"""Generate graph with periodic boundary conditions.
Parameters
----------
jnet : jump network
graph : original graph
pos : node positions
SP : shortest paths
SL : shortest path lengths
JV : number of vertical jumps along shortest path
JH : number of horizontal jumps along shortest path
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : graph with periodic boundary conditions
"""
B=jnet.number_of_nodes() # get number of nodes of jump network
C=np.tril((SL<dthres)*((JV+JH)>0)*((JV+JH)<jthres))[B:,B:] # get pairs of nodes in jump network that are less than dthres apart and that are connected by at least/most 0/jthres
wh=np.array(np.where(C)).T
graphz=nx.MultiGraph(graph.copy()) # create new, empty multigraph
for idx,(w1,w2) in enumerate(wh): # for each pair of nodes, i.e., each potential edge...
path=SP[B+w1][B+w2] # get shortest path between selected nodes
pairs=zip(path[0:],path[1:])
weight=0.0
for n0,n1 in pairs: # for each edge along path...
if(jnet[n0][n1]['jump']==0): # if it is not a jump edge...
rr,cc=skimage.draw.line(pos[n0][1],pos[n0][0],pos[n1][1],pos[n1][0]) # draw line along edge
weight+=imG[cc,rr].sum() # add edge weight as sum of intensities in the underlying image along the line
edist=SL[B+w1,B+w2] # set edge Euclidean length
edist=max(1.0,edist)
fdist=1.0*np.ceil(edist) # approximate filament arc length
weight=max(1e-9,weight)
capa=1.0*weight/fdist # compute edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to periodic, non-connected connected network
jump=1 # set edge jump variable indicating that edge belongs to periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(w2,w1,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge
return graphz
def compute_graph(graph,pos,mask):
"""Compute graph properties.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
quanta : list of graph properties
"""
N=graph.number_of_nodes() # number of nodes
E=graph.number_of_edges() # number of edges
ca=utils.connected_components(graph) # compute sizes of connected components
C=len(ca) # number of connected components
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
bund=np.nanmean(ec) # compute average edge capacity ('bundling')
assort=nx.degree_pearson_correlation_coefficient(graph,weight='capa') # compute assortativity ('heterogeneity')
dist=utils.path_lengths(graph) # compute shortest path lengths
distMU=np.nanmean(dist) # compute average path length ('reachability')
distSD=np.nanstd(dist) # compute standard deviation of path lengths
distCV=1.0*distSD/distMU # compute coefficient of variation of path lengths ('disperal')
ac=np.sort(nx.laplacian_spectrum(graph,weight='capa'))[1] # compute algebraic connectivity ('robustness')
degs=utils.edge_angles(graph,pos[:,:2],mask) # compute edge angles relative to cell axis
angleMU=np.nanmean(degs) # compute average angle
angleSD=np.nanstd(degs) # compute standard deviation of angles
angleCV=1.0*angleSD/angleMU # compute coefficient of variation of angles ('contortion')
cns=utils.crossing_number(graph,pos[:,:2]) # compute number of edge crossings per edge
crossing=np.nanmean(cns) # compute average crossing number
quants=['# nodes','# edges','# connected components','avg. edge capacity','assortativity','avg. path length','CV path length','algebraic connectivity','CV edge angles','crossing number'] # list of graph property names
quanta=[N,E,C,bund,assort,distMU,distCV,ac,angleCV,crossing] # list of graph properties
return quanta
#%%############################################################################# periodic functions
def mask2rot(mask):
"""Compute main axis of cellular region of interest.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
c0,c1 : coordinates along cell axis
vc,vd : center point and direction vector of cell axis
angle : angle between y-axis and main cell axis
rot : rotation matrix
"""
line=skimage.morphology.skeletonize(mask) # skeletonize mask
co=np.array(np.where(line>0)).T[:,::-1] # get coordinates of skeleton line
L=int(len(co)*0.2) # get points 20% and 80% along the cell axis
c0=co[L]
c1=co[-L]
vc=co[int(len(co)*0.5)] # get center point and direction vector of cell axis
vd=c0-c1
angle=utils.angle360(vd) # compute angle of cell axis
angli=angle*np.pi/180.0 # convert angle to radian
rot=np.array([[np.cos(angli),-np.sin(angli)],[np.sin(angli),np.cos(angli)]]) # compute rotation matrix
return c0,c1,vc,vd,angle,rot
def mask2poly(mask):
"""Convert cellular region of interest to polygon.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
polya : original polygon
polyn : rotated polygon aligned with y-axis
"""
maski=sp.ndimage.minimum_filter(mask,3,mode='constant',cval=0) # shrink mask
polya=skimage.measure.find_contours(maski,0)[0] # find contours
polya=skimage.measure.approximate_polygon(polya,tolerance=0.0) # approximate polygon
polya=1.0*remove_duplicates(polya) # remove duplicate points
c0,c1,vc,vd,an,rot=mask2rot(maski) # compute cell axis
polyn=np.dot(polya,rot) # rotate polygon
return polya[:,::-1],polyn[:,::-1]
def pbc_jnet_border(polyn):
"""Compute border of jump network.
Parameters
----------
polyn : rotated polygon of cellular region of interest
Returns
-------
graph : border of jump network
"""
polyi=1.0*polyn.astype('int') # convert coordinates to integers
polys=shapely.geometry.Polygon(polyi) # convert polygon to shapely polygon
B=len(polyi) # get number of polygon points
graph=nx.empty_graph(B) # create new, empty graph
for i in range(2): # for both x- and y-components...
bx=polyi[:,i] # get coordinate
for idx,x in enumerate(set(bx)): # for each point
yy=np.sort(np.where(x==bx)[0]) # get other points with same coordinate
Y=len(yy)
for y in range(Y-1): # for each other point with same coordinate
y1,y2=yy[y],yy[y+1]
line=shapely.geometry.LineString([polyi[y1],polyi[y2]]) # draw line between the two selected points
if(line.within(polys)): # if the line is fully contained within the polygon...
graph.add_edge(y1,y2,weight=0.0,jump=0.001**i) # add the to network (jump=0.001/0.00001 lines parallel to along x/y-axis)
distb=sp.spatial.distance_matrix(polyn,polyn) # compute distance matrix between point of polygon
for b1 in range(B): # for each point along polygon
b2=np.mod(b1+1,B)
graph.add_edge(b1,b2,weight=distb[b1,b2],jump=0.0) # add edge no neighboring point
return graph
def pbc_jnet_interior(pos,polya,jborder,cthres=10.0):
"""Compute interier of jump network.
Parameters
----------
pos : node positions
polya : original polygon of cellular region of interest
jborder : border of jump network
cthres : maximum edge length between nodes of original network and border of jump network
Returns
-------
jnet : complete jump network
SP : array of shortest path lengths
SL : array of jump sizes
JV : get number of vertical jumps
JH : get number of horizonal jumps
"""
jnet=jborder.copy() # copy border of jump network
B=jnet.number_of_nodes() # get number of nodes
distn=sp.spatial.distance_matrix(pos,polya) # compute distances between node positions and border of jump network
for n in range(len(pos)): # for each node...
jnet.add_node(B+n) # add node to jump network
for e in np.where(distn[n]<cthres)[0]: # add edge if node is close enough to border of jump network
jnet.add_edge(B+n,e,weight=distn[n,e],jump=0.0)
for n in range(len(pos)): # for each node...
if(jnet.degree(B+n)==0): # add dummy edge to make network connected if node is disconnected
jnet.add_edge(B+n,0,weight=9999.0,jump=0.0)
SP=utils.all_pairs_dijkstra_path(jnet,weight='weight',jump='jump') # compute all shortest path in jump network
SX=utils.all_pairs_dijkstra_path_length(jnet,weight='weight',jump='jump') # compute all shortest path lengths in jump network
SL=1.0*np.array([[d1 for d1 in d2[0].values()] for d2 in SX.values()]) # array of shortest path lengths
SJ=1.0*np.array([[d1 for d1 in d2[1].values()] for d2 in SX.values()]) # array of jump sizes
JV=np.floor(SJ+0.5) # get number of vertical jumps
JH=np.floor(np.mod(SJ,1.0)*1000.0+0.5) # get number of horizonal jumps
return jnet,SP,SL,JV,JH
#%%############################################################################# NetworkX: shortest path algorithms for weighed graphs
# -*- coding: utf-8 -*-
#"""
#Shortest path algorithms for weighed graphs.
#"""
#__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
# 'Loïc Séguin-C. <[email protected]>',
# 'Dan Schult <[email protected]>'])
## Copyright (C) 2004-2011 by
## Aric Hagberg <[email protected]>
## Dan Schult <[email protected]>
## Pieter Swart <[email protected]>
## All rights reserved.
## BSD license.
#
#__all__ = ['dijkstra_path',
# 'dijkstra_path_length',
# 'bidirectional_dijkstra',
# 'single_source_dijkstra',
# 'single_source_dijkstra_path',
# 'single_source_dijkstra_path_length',
# 'all_pairs_dijkstra_path',
# 'all_pairs_dijkstra_path_length',
# 'dijkstra_predecessor_and_distance',
# 'bellman_ford','negative_edge_cycle']
import heapq
import networkx as nx
from networkx.utils import generate_unique_node
def dijkstra_path(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path(G,0,4))
[0, 1, 2, 3, 4]
Notes
------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
(length,path)=single_source_dijkstra(G, source, target=target,
weight=weight,jump=jump)
try:
return path[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def dijkstra_path_length(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G,0,4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
length=single_source_dijkstra_path_length(G, source, weight=weight,jump= jump)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def single_source_dijkstra_path(G,source, cutoff=None, weight='weight',jump= 'jump'):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_dijkstra_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
(length,path)=single_source_dijkstra(G,source, weight = weight,jump= jump)
return path
def single_source_dijkstra_path_length(G, source, cutoff= None,
weight= 'weight',jump= 'jump'):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
length : dictionary
Dictionary of shortest lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.single_source_dijkstra_path_length(G,0)
>>> length[4]
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
dist = {} # dictionary of final distances
jumq={}
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source,0))
while fringe:
(d,v,j)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
jumq[v] = j#jumq[v]+vw_jumq
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_jumq = jumq[v] + edgedata.get(jump,1)
ddist=edgedata.get(weight,1)
vw_dist = dist[v] + ddist
if(vw_dist<9999.0):
if(int(vw_jumq)>1 or int(vw_jumq%1.0*1000.0+0.5)>1):
ddist=9999.0
vw_dist = dist[v] + ddist
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w,vw_jumq))
return dist,jumq
def single_source_dijkstra(G,source,target=None,cutoff=None,weight='weight',jump='jump'):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.single_source_dijkstra(G,0)
>>> print(length[4])
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> path[4]
[0, 1, 2, 3, 4]
Notes
---------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path()
single_source_dijkstra_path_length()
"""
if source==target:
return ({source:0}, {source:[source]})
dist = {} # dictionary of final distances
paths = {source:[source]} # dictionary of paths
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
paths[w] = paths[v]+[w]
return (dist,paths)
def dijkstra_predecessor_and_distance(G,source, cutoff=None, weight='weight'):
"""Compute shortest path length and predecessors on shortest paths
in weighted graphs.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred,distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
"""
push=heapq.heappush
pop=heapq.heappop
dist = {} # dictionary of final distances
pred = {source:[]} # dictionary of predecessors
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
push(fringe,(0,source))
while fringe:
(d,v)=pop(fringe)
if v in dist: continue # already searched this node.
dist[v] = d
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe,(vw_dist,w))
pred[w] = [v]
elif vw_dist==seen[w]:
pred[w].append(v)
return (pred,dist)
def all_pairs_dijkstra_path_length(G, cutoff=None, weight='weight',jump= 'jump'):
""" Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest path lengths.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_dijkstra_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path_length(G,n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def all_pairs_dijkstra_path(G, cutoff=None, weight='weight',jump='jump'):
""" Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.all_pairs_dijkstra_path(G)
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall()
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path(G, n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def bellman_ford(G, source, weight = 'weight'):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of O(mn) where n is the number of
nodes and m is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(5, create_using = nx.DiGraph())
>>> pred, dist = nx.bellman_ford(G, 0)
>>> pred
{0: None, 1: 0, 2: 1, 3: 2, 4: 3}
>>> dist
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> from nose.tools import assert_raises
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> G[1][2]['weight'] = -7
>>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise KeyError("Node %s is not found in the graph"%source)
numb_nodes = len(G)
dist = {source: 0}
pred = {source: None}
if numb_nodes == 1:
return pred, dist
if G.is_multigraph():
def get_weight(edge_dict):
return min([eattr.get(weight,1) for eattr in edge_dict.values()])
else:
def get_weight(edge_dict):
return edge_dict.get(weight,1)
for i in range(numb_nodes):
no_changes=True
# Only need edges from nodes in dist b/c all others have dist==inf
for u, dist_u in list(dist.items()): # get all edges from nodes in dist
for v, edict in G[u].items(): # double loop handles undirected too
dist_v = dist_u + get_weight(edict)
if v not in dist or dist[v] > dist_v:
dist[v] = dist_v
pred[v] = u
no_changes = False
if no_changes:
break
else:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
return pred, dist
def negative_edge_cycle(G, weight = 'weight'):
"""Return True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> import networkx as nx
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]['weight'] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford() but finds negative cycles
on any component by first adding a new node connected to
every node, and starting bellman_ford on that node. It then
removes that extra node.
"""
newnode = generate_unique_node()
G.add_edges_from([ (newnode,n) for n in G])
try:
bellman_ford(G, newnode, weight)
except nx.NetworkXUnbounded:
G.remove_node(newnode)
return True
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight = 'weight'):
"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.bidirectional_dijkstra(G,0,4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is pi*r*r while the
others are 2*pi*r/2*r/2, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source == target: return (0, [source])
#Init: Forward Backward
dists = [{}, {}]# dictionary of final distances
paths = [{source:[source]}, {target:[target]}] # dictionary of paths
fringe = [[], []] #heap of (distance, node) tuples for extracting next node to expand
seen = [{source:0}, {target:0} ]#dictionary of distances to nodes seen
#initialize fringe heap
heapq.heappush(fringe[0], (0, source))
heapq.heappush(fringe[1], (0, target))
#neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G.successors_iter, G.predecessors_iter]
else:
neighs = [G.neighbors_iter, G.neighbors_iter]
#variables to hold shortest discovered path
#finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1-dir
# extract closest to expand
(dist, v )= heapq.heappop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist #equal to seen[dir][v]
if v in dists[1-dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist,finalpath)
for w in neighs[dir](v):
if(dir==0): #forward
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[v][w].items()))
else:
minweight=G[v][w].get(weight,1)
vwLength = dists[dir][v] + minweight #G[v][w].get(weight,1)
else: #back, must remember to change v,w->w,v
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[w][v].items()))
else:
minweight=G[w][v].get(weight,1)
vwLength = dists[dir][v] + minweight #G[w][v].get(weight,1)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
heapq.heappush(fringe[dir], (vwLength,w))
paths[dir][w] = paths[dir][v]+[w]
if w in seen[0] and w in seen[1]:
#see if this path is better than than the already
#discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
| gpl-3.0 |
berkeley-stat159/project-delta | code/scripts/diagnosis.py | 1 | 4712 | """
Purpose
-------
This script runs a diagnostic analysis in bulk on the raw BOLD data of all runs
contained in the ds005 dataset. It preforms the task of finding outlier volumes,
with respect to the standard deviation among voxels in individual volumes.
It should export a total of six files:
- `vol_std_values.txt`
- `vol_std_outliers.txt`
- `vol_std.png`
- `vol_rms_outliers.png`
- `extd_vol_rms_outliers.png`
- `extd_vol_rms_outliers.txt`
"""
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import nibabel as nib
import numpy.linalg as npl
import sys
sys.path.append("code/utils")
from diagnostics import *
from hypothesis import *
from make_class import *
from plot_tool import *
# Create a collection of all subject IDs and all run IDs
run_IDs = [str(i).zfill(3) for i in range(1, 4)]
subject_IDs = [str(i).zfill(3) for i in range(1, 17)]
IDs = list(zip([run_ID for _ in range(16) for run_ID in run_IDs],
[subject_ID for _ in range(3) for subject_ID in subject_IDs]))
IDs.sort()
# We perform the procedure outlined in this script for each run of each subject:
for ID in IDs:
run, subject = ID
# Create a directory to which the results will be saved
path_result = "results/run%s/diagnosis/sub%s/" % ID
try:
os.makedirs(path_result)
except OSError:
if not os.path.isdir(path_result):
raise
# Extract all relevant data stored within the ds005 files:
data = ds005(subject, run).filtered.data
# Compute the standard deviation over all voxels for each volume
vol_std_values = vol_std(data)
np.savetxt(path_result + "vol_std_values.txt", vol_std_values)
# Extract the indices of outlier volumes
outlier_idx, lo_hi_thresh = iqr_outliers(vol_std_values)
np.savetxt(path_result + "vol_std_outliers.txt", outlier_idx)
# We plot the volume standard deviation values, marking each outlier point
# with an 'o' and the two thresholds with horizontal dashed lines
plt.plot(vol_std_values, c="b")
outliers = plt.scatter(outlier_idx, vol_std_values[outlier_idx], c="r")
lo_thresh = plt.axhline(lo_hi_thresh[0], color="c", ls="--")
hi_thresh = plt.axhline(lo_hi_thresh[1], color="g", ls="--")
plt.title("Volume Standard Deviation")
plt.xlabel("Volume Index")
plt.ylabel("Standard Deviation")
plt.xlim(0, 240)
plt.ylim(np.floor(min(vol_std_values)), np.ceil(max(vol_std_values)))
plt.legend((outliers, lo_thresh, hi_thresh),
("Outliers", "Low IRQ Threshold", "High IRQ Threshold"), loc=0)
plt.savefig(path_result + "vol_std.png")
plt.close()
# We make a new plot of the root-mean-square values, once again marking
# each outlier with an 'o' and the thresholds with horizontal dashed lines
rmsd = vol_rms_diff(data)
rmsd_outlier_idx, rmsd_thresh = iqr_outliers(rmsd)
plt.plot(rmsd, c="b")
rmsd_outliers = plt.scatter(rmsd_outlier_idx, rmsd[rmsd_outlier_idx], c="r")
lo_rmsd_thresh = plt.axhline(rmsd_thresh[0], color="c", ls="--")
hi_rmsd_thresh = plt.axhline(rmsd_thresh[1], color="g", ls="--")
plt.title("RMS Differences")
plt.xlabel("Difference Index")
plt.ylabel("RMS Difference")
plt.xlim(0, 240)
plt.legend((rmsd_outliers, lo_rmsd_thresh, hi_rmsd_thresh),
("Outliers", "Low IRQ threshold", "High IRQ threshold"), loc=0)
plt.savefig(path_result + "vol_rms_outliers.png")
plt.close()
# We make one last plot of the extended difference outliers, once again with
# each outlier marked with an 'o' and horizontal dashed lines at the
# thresholds. Notice that we must append a 0 to the root-mean-square
# differences so that its length will be equal to the number of volumes.
edo_idx = extend_diff_outliers(rmsd_outlier_idx)
extd_rmsd = np.append(rmsd, 0)
plt.plot(extd_rmsd, c="b")
extd_rmsd_outlier = plt.scatter(edo_idx, extd_rmsd[edo_idx], c="r")
extd_lo_rmsd_thresh = plt.axhline(rmsd_thresh[0], color="c", ls="--")
extd_hi_rmsd_thresh = plt.axhline(rmsd_thresh[1], color="g", ls="--")
plt.title("Entended RMS Difference")
plt.xlabel("Difference Index")
plt.ylabel("RMS Difference")
plt.xlim(0, 240)
plt.legend((extd_rmsd_outlier, extd_lo_rmsd_thresh, extd_hi_rmsd_thresh),
("Extended Outliers", "Low IRQ Threshold", "High IRQ Threshold"),
loc=0)
plt.savefig(path_result + "extended_vol_rms_outliers.png")
plt.close()
# Lastly, in the spirit of good bookkeeping, we also save the extended
# outlier indices to a plaintext file.
np.savetxt(path_result + "extended_vol_rms_outliers.txt", edo_idx)
| bsd-3-clause |
CharLLCH/work-for-py | nlp-train/bm-rotus/adjust_word.py | 1 | 2888 | #coding=utf-8
import pickle
import os
import nltk
import csv
from read_conf import config
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import linear_model
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.decomposition import PCA
cat_dic = {'acq':0,'corn':1,'crude':2,'earn':3,'grain':4,'interest':5,'money-fx':6,'ship':7,'trade':8,'wheat':9}
t_path = config("../conf/dp.conf")
train_path = t_path["train_path"]
test_path = t_path["test_path"]
def handle_doc(word_set,rs_path):
doc_dir = os.listdir(rs_path)
doc_matrix = []
doc_cat = []
for docs in doc_dir:
files = os.listdir(rs_path+docs)
print "start to handle the --> "+docs
for file_d in files:
d_path = rs_path+docs+'/'+file_d
#get the single file path
with open(d_path,'rb') as text_file:
str_tmp = ''
file_lines = text_file.readlines()
for line in file_lines:
pattern = r'''[a-zA-Z]+'''
tokens = nltk.regexp_tokenize(line,pattern)
for t in tokens:
if t.lower() in word_set:
str_tmp += t.lower()
str_tmp += ' '
doc_matrix.append(str_tmp)
doc_cat.append(cat_dic[docs])
text_file.close()
str_tmp = ''
for sw in word_set:
str_tmp += sw
str_tmp += ' '
doc_matrix.append(str_tmp)
doc_cat.append('NAN')
vectorizer = CountVectorizer()
doc_num = vectorizer.fit_transform(doc_matrix)
tfidf = TfidfTransformer()
doc_tfidf = tfidf.fit_transform(doc_num)
return doc_tfidf[:-1,:],doc_cat[:-1]
def get_wset(path):
with open(path,'rb') as infile:
reader = csv.reader(infile)
tmp = []
for i in reader:
tmp = i
break
return set(tmp)
def save_matrix(path,matrix):
outfile = open(path,'wb')
pickle.dump(matrix,outfile,True)
if __name__ == "__main__":
word_set = get_wset(t_path["wordset_path"])
tr_m,tr_c = handle_doc(word_set,train_path)
te_m,te_c = handle_doc(word_set,test_path)
save_matrix(t_path['train_matrix'],tr_m)
save_matrix(t_path['train_cat'],tr_c)
save_matrix(t_path['test_matrix'],te_m)
save_matrix(t_path['test_cat'],te_c)
logreg = linear_model.LogisticRegression(C=8.5)
logreg.fit(tr_m,tr_c)
test_pre = logreg.predict(te_m)
'''
neigh = KNeighborsClassifier(n_neighbors=8,weights='distance')
neigh.fit(tr_m,tr_c)
test_pre = neigh.predict(te_m)
'''
succ_num = 0
for i in range(len(test_pre)):
if te_c[i] == test_pre[i]:
succ_num += 1
print "Acc : %lf"%(1.0*succ_num/len(te_c)*100)
| gpl-2.0 |
jorik041/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
aewhatley/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
JPalmerio/GRB_population_code | grbpop/simple_example.py | 1 | 3663 | from ECLAIRs import init_ECLAIRs
from cosmology import init_cosmology, create_cosmology
from GRB_population import GRBPopulation, create_GRB_population_from
import plotting_functions as pf
import matplotlib.pyplot as plt
import io_grb_pop as io
import numpy as np
import logging
import sys
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
datefmt='%H:%M:%S')
logging.getLogger('matplotlib').setLevel(logging.WARNING)
# Define the paths used by the code
paths_to_dir, paths_to_files = io.generate_paths(conf_fname='config_simple_example.yml',
param_fname='parameters_simple_example.yml',
init_dir=None)
# Read the input files
config, params, instruments, samples, obs_constraints = io.read_init_files(paths_to_files)
io.create_output_dir(paths_to_dir=paths_to_dir, dir_name=config['output_dir'], overwrite=True)
# If you want to make predictions for ECLAIRs you need the ECLAIRs properties
ECLAIRs_prop = None
# ECLAIRs_prop = init_ECLAIRs(ECLAIRs_dir=paths_to_dir['ECLAIRs'],
# Emin=instruments['ECLAIRs']['Emin'],
# Emax=instruments['ECLAIRs']['Emax'],
# n_sigma=instruments['ECLAIRs']['n_sigma'])
# samples['ECLAIRs']['pflx_min'] = ECLAIRs_prop['bkg_total']
# You must do this before calling io.create_config
# Code calculates which samples, instruments, and constraints to include
incl_samples, incl_instruments, incl_constraints = io.create_config(config,
samples,
instruments,
obs_constraints)
# Initialize the cosmology
cosmo = init_cosmology(paths_to_dir['cosmo'])
# If you want to create your own cosmology use:
# cosmo = create_cosmology(OmegaM=0.3, OmegaL=0.7, h=0.7)
# Generate the GRB population
np.random.seed(0)
gp = create_GRB_population_from(Nb_GRBs=config['Nb_GRBs'],
cosmo=cosmo,
params=params,
incl_samples=incl_samples,
incl_instruments=incl_instruments,
incl_constraints=incl_constraints,
ECLAIRs_prop=ECLAIRs_prop,
output_dir=paths_to_dir['output'],
run_mode='debug',
savefig=True)
# This function is equivalent to the following:
# gp = GRBPopulation(Nb_GRBs=config['Nb_GRBs'], output_dir=paths_to_dir['output'])
# gp.draw_GRB_properties(cosmo=cosmo, params=params, run_mode='debug', savefig=True)
# gp.calculate_quantities(instruments=incl_instruments, samples=incl_samples)
# gp.create_mock_constraints(constraints=incl_constraints)
# gp.compare_to_observational_constraints(constraints=incl_constraints)
# gp.normalize_to_Stern()
# print(gp.summary())
try:
plt.style.use('presentation')
except:
pass
fig, axes = plt.subplots(1,3, figsize=(15,4), tight_layout=False)
pf.plot_intensity_constraint(axes[0], pop=gp, label='Simple Example', color='C3')
pf.plot_spectral_constraint(axes[1], pop=gp, label='Simple Example', color='C3')
pf.plot_redshift_constraint(axes[2], pop=gp, label='Simple Example', color='C3')
fig.savefig(paths_to_dir['output']/'constraints_simple_example.pdf', bbox_inches='tight')
| gpl-3.0 |
MartinDelzant/scikit-learn | sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
coolankit95/Kaggle-Competitions | Housing Price Competition/KaggleRegressionCompetition.py | 1 | 7719 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 15:35:31 2017
@author: Ankit
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
train = pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
X = train.iloc[:, :-1].values
y = train.iloc[:, -1].values
#obtain all the numeric features in both datasets
#here numric features is also a dataframe
numeric_features_train=train.select_dtypes(include=[np.number])
numeric_features_train.describe()
numeric_features_test=test.select_dtypes(include=[np.number])
numeric_features_test.describe()
#obtain categorical variables in both datasets
#Use one -hot encoding to encode these variables to numerical values
categoricals_train = train.select_dtypes(exclude=[np.number])
categoricals_train.describe()
categoricals_test = test.select_dtypes(exclude=[np.number])
categoricals_test.describe()
#obtain null values for each column in both datasets
nulls_train = pd.DataFrame(train.isnull().sum().sort_values(ascending=False)[:])
nulls_train.columns = ['Null Count']
nulls_train.index.name = 'Feature'
nulls_train[:16]
nulls_test = pd.DataFrame(test.isnull().sum().sort_values(ascending=False)[:])
nulls_test.columns = ['Null Count']
nulls_test.index.name = 'Feature'
nulls_test[:16]
#Delete the columns haivng most null values in Train as well as in test dataset
for elem in nulls_train.index[:16]:
train=train.drop(elem,1)
test=test.drop(elem,1)
#examine the correlation between attributes in train dataset
corr=numeric_features_train.corr()
#corr['SalePrice'].sort_values(ascending='False').index
#Delete all the columns which has the very less correlation with Target Variable
#Removing attributes which have correlation coeff b/w -0.2 to 0.4
del_corr=[]
for elem in corr['SalePrice'].sort_values(ascending='False').index:
val=corr['SalePrice'][elem]
if(val<0.400000 and val>(-0.20000)):
del_corr.append(elem)
#check if label are present in dataset or not
for label in del_corr:
if(label in train.columns):
train=train.drop(label,axis=1)
test=test.drop(label,axis=1)
categoricals_train = train.select_dtypes(exclude=[np.number])
categoricals_train.describe()
categoricals_test = test.select_dtypes(exclude=[np.number])
categoricals_test.describe()
#Remove the categorcial attributes which have categories<=6
#this is beacauese this wont affect much the dependent variable
for column in categoricals_train.columns:
if(len(train[column].unique())<=6):
train=train.drop(column,axis=1)
test=test.drop(column,axis=1)
#UP till here
#Removed Null variable
#Removed less correlated variables
#Removed some categorical variables
#Split Categorical variables into Dummy Variables with Corresponding Values as 0 or 1
#Depending on whether that variable need to present for that particular record
l=[0,1,2,3,7,8,9,16,20] #index of categorical variables
from sklearn.preprocessing import LabelEncoder ,OneHotEncoder
labelencoder_train= LabelEncoder()
labelencoder_test= LabelEncoder()
for i in l:
train.iloc[:,i]=labelencoder_train.fit_transform(train.iloc[:,i].factorize()[0])
test.iloc[:,i]=labelencoder_test.fit_transform(test.iloc[:,i].factorize()[0])
#Encode the dataset to get dummy categories
train=pd.get_dummies(train,columns=['Neighborhood','Condition1','Condition2','HouseStyle','RoofMatl','Exterior1st','Exterior2nd','FullBath','TotRmsAbvGrd','Functional','Fireplaces','GarageCars','SaleType'])
test=pd.get_dummies(test,columns=['Neighborhood','Condition1','Condition2','HouseStyle','RoofMatl','Exterior1st','Exterior2nd','FullBath','TotRmsAbvGrd','Functional','Fireplaces','GarageCars','SaleType'])
#Now Avoid Dummy variable trap
train=train.drop('Neighborhood_23',axis=1)
train=train.drop('Condition1_7',axis=1)
train=train.drop('Condition2_6',axis=1)
train=train.drop('HouseStyle_6',axis=1)
train=train.drop('RoofMatl_7',axis=1)
train=train.drop('Exterior1st_14',axis=1)
train=train.drop('Exterior2nd_15',axis=1)
train=train.drop('FullBath_3',axis=1)
train=train.drop('TotRmsAbvGrd_14',axis=1)
train=train.drop('Functional_6',axis=1)
train=train.drop('Fireplaces_3',axis=1)
train=train.drop('GarageCars_4',axis=1)
train=train.drop('SaleType_8',axis=1)
train=train.drop('Condition1_8',axis=1)
train=train.drop('Condition2_4',axis=1)
train=train.drop('Condition2_5',axis=1)
train=train.drop('Condition2_7',axis=1)
test=test.drop('Neighborhood_24',axis=1)
test=test.drop('Condition1_8',axis=1)
test=test.drop('Condition2_4',axis=1)
test=test.drop('HouseStyle_6',axis=1)
test=test.drop('RoofMatl_3',axis=1)
test=test.drop('Exterior1st_13',axis=1)
test=test.drop('Exterior2nd_15',axis=1)
test=test.drop('FullBath_4',axis=1)
test=test.drop('TotRmsAbvGrd_15',axis=1)
test=test.drop('Functional_7',axis=1)
test=test.drop('Fireplaces_4',axis=1)
test=test.drop('GarageCars_5.0',axis=1)
test=test.drop('SaleType_9',axis=1)
print(list(train.isnull().any()))
#Remove Null values
train['MasVnrArea']=train['MasVnrArea'].factorize()[0]
train.isnull().any()
print(list(test.isnull().any()))
test['MasVnrArea']=test['MasVnrArea'].factorize()[0]
test['TotalBsmtSF']=test['TotalBsmtSF'].factorize()[0]
test['GarageArea']=test['GarageArea'].factorize()[0]
print(list(test.isnull().any()))
X=train.iloc[:,:]
X=X.drop('SalePrice',axis=1)
y=train.iloc[:,8]
#Now Standatdize the dataset
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_train = StandardScaler()
sc_test = StandardScaler()
X = sc_train.fit_transform(X)
test = sc_test.fit_transform(test)
#Dependent Variable
#Now Split the Dataset into Independent and Dependent Variables
#Run Random forest regression
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 100, random_state = 0)
regressor.fit(X, y)
final_prediction=regressor.predict(test)
#Run decision tree regression
from sklearn.tree import DecisionTreeRegressor
regressor=DecisionTreeRegressor()
regressor.fit(X,y)
final_prediction=regressor.predict(test)
#Run support vector regression
from sklearn.svm import SVR
regressor = SVR(kernel='linear')
regressor.fit(X,y)
final_prediction=regressor.predict(test)
#run linear regression
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X,y)
final_prediction=regressor.predict(test)
"""
#Run ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import os
regressor = Sequential()
regressor.add(Dense(units = 61, kernel_initializer = 'uniform', activation = 'sigmoid', input_dim = 121))
# Adding the second hidden layer
regressor.add(Dense(units= 61, kernel_initializer ='uniform', activation = 'sigmoid'))
#here we dont use ant activation function for regression problem
regressor.add(Dense(units = 1, kernel_initializer = 'uniform'))
X=np.array(X)
y=np.array(y)
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['accuracy'])
regressor.fit(X, y, batch_size = 10, nb_epoch = 100)
# evaluate the model
scores = regressor.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (regressor.metrics_names[1], scores[1]*100))
final_prediction=regressor.predict(test)
"""
submission=open('/Users/arjita/ML/KaggleRegressionCompetition/RandomForest1Submission.csv','w')
submission.write('Id'+','+'SalePrice'+'\n')
for i in range(len(final_prediction)):
submission.write(str(i+1461)+','+str(format(final_prediction[i],'0.9f'))+'\n')
| gpl-3.0 |
ghorn/rawesome | studies/test_mhe_mpc_export/mpc_mhe_utils.py | 2 | 11499 | # Copyright 2012-2013 Greg Horn
#
# This file is part of rawesome.
#
# rawesome is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rawesome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with rawesome. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 15:19:57 2013
@author: mzanon
"""
import numpy as np
import scipy
import copy
import matplotlib.pyplot as plt
import casadi as C
import rawe
def dlqr(A, B, Q, R, N=None):
if N == None:
N = np.zeros((Q.shape[0],R.shape[0]))
P = scipy.linalg.solve_discrete_are(A, B, Q, R)
# nx = A.shape[0]
# nu = B.shape[1]
# PL = np.eye(nx)
# for k in range(10):
# M = np.bmat([[ Q, np.zeros((nx,nu)), np.zeros((nx,nx)) ],
# [ 0, R, np.zeros((nu,nx)) ],
# [ -np.dot(PL,A), -np.dot(PL,B), PL ]])
# np.linalg.inv(np.dot(M.T,M))*M.T
k1 = np.dot(np.dot(B.T, P), B) + R
k2 = np.dot(np.dot(B.T, P), A)
K = np.linalg.solve(k1,k2)
return K, P
def ComputeTerminalCost(integrator, xlin, ulin, Q, R, N=None):
integrator.x = xlin
integrator.u = ulin
integrator.step()
A = integrator.dx1_dx0
B = integrator.dx1_du
# integrator.getOutputs()
K, P = dlqr(A, B, Q, R, N=None)
return K, P, A, B
def UpdateArrivalCost(integrator, x, u, xL, yL, PL, VL, WL):
''' Arrival cost implementation.
Approximate the solution of:
min_{xL_,uL_,xL1_} || PL ( xL_-xL ) ||
|| VL ( yL-h(xL_,uL_) ) ||
|| WL wx ||
s.t. wx = xL1_ - f(xL_,uL_)
Linearization (at the last MHE estimate x,u which is different from xL,uL):
f(xL_,uL_) ~= f(x,u) + df(x,u)/dx (xL_-x) + df(x,u)/du (uL_-u)
~= f(x,u) + Xx (xL_-x) + Xu (uL_-u)
~= f(x,u) - Xx x - Xu u + Xx xL_ + Xu uL_
~= x_tilde + Xx xL_ + Xu uL_
h(xL_,uL_) ~= h(x,u) + dh(x,u)/dx (xL_-x) + dh(x,u)/du (uL_-u)
~= f(x,u) + Hx (xL_-x) + Hu (uL_-u)
~= h(x,u) - Hx x - Hu u + Hx xL_ + Hu uL_
~= h_tilde + Hx xL_ + Hu uL_
Linearized problem:
min_{xL_,uL_,xL1_} || PL ( xL_ - xL ) ||
|| VL ( yL - h_tilde - Hx xL_ - Hu uL_ ) ||
|| WL ( xL1_ - x_tilde - Xx xL_ - Xu uL_ ) ||
Rewrite as:
min_{xL_,uL_,xL1_} || M ( xL_, uL_, xL1_ ) + res ||
After QR factorization of M:
min_{xL_,uL_,xL1_} || R ( xL_, uL_, xL1_ ) + rho ||
'''
nx = x.shape[0]
nu = u.shape[0]
nV = VL.shape[0]
integrator.x = x
integrator.u = u
out = integrator.getOutputs()
h = np.squeeze(out['measurements'])
x1 = integrator.step()
Xx = integrator.dx1_dx0
Xu = integrator.dx1_du
Hx = np.diag([1,0])
Hu = np.array([[0,1]]).T
x_tilde = x1 - np.dot(Xx,x) - np.dot(Xu,u)
h_tilde = h - np.dot(Hx,x) - np.dot(Hu,u)
res = np.bmat([ -np.dot(PL, xL),
np.dot(VL, yL - h_tilde),
-np.dot(WL, x_tilde) ])
res = np.squeeze(np.array(res))
M = np.bmat([[ PL, np.zeros((nx,nu)), np.zeros((nx,nx)) ],
[ -np.dot(VL,Hx), -np.dot(VL,Hu), np.zeros((nV,nx)) ],
[ -np.dot(WL,Xx), -np.dot(WL,Xu), WL ]])
Q, R = np.linalg.qr(M)
# R1 = R[:nx+nu,:nx+nu]
# R12 = R[:nx+nu,nx+nu:]
R2 = R[nx+nu:,nx+nu:]
# rho = np.linalg.solve(Q,res)
rho = np.squeeze(np.array(np.dot(Q.T,res)))
rho2 = rho[nx+nu:]
PL1 = R2
xL1 = -np.linalg.solve(R2,rho2)
return np.array(PL1), np.array(xL1)
def GenerateReference(dae,conf,refP):
z0 = refP['z0']
r0 = refP['r0']
ddelta0 = refP['ddelta0']
steadyState, _ = getSteadyState(dae,conf,ddelta0,r0,z0)
xref = {}
uref = {}
for name in dae.xNames():
xref[name] = steadyState[name]
for name in dae.uNames():
uref[name] = steadyState[name]
return xref, uref
def InitializeMPC(mpcrt,dae):
mpcrt.x = np.zeros(mpcrt.x.shape)
mpcrt.u = np.zeros(mpcrt.u.shape)
mpcrt.S[0,0] = 2.0#/(xRms)**2
mpcrt.S[1,1] = 1.0#/(vRms)**2
mpcrt.S[2,2] = 1.0#/(fRms)**2
mpcrt.SN[0,0] = 1.0#/(xRms)**2
mpcrt.SN[1,1] = 1.0#/(vRms)**2
P = np.array([[2.870184272463204e+01, 1.415980225850630e+01],
[1.415980225850630e+01, 2.011263075885158e+01]])# 28.7018-2, 14.1598], [14.1598, 20.1126-1]])
mpcrt.SN = P
# LinearizeSystem(mpcrt,dae)
# mpcLog = rawe.ocp.ocprt.Logger(mpcrt,dae)
#
# return mpcLog
def InitializeMHE(mhert,dae):
mhert.x = np.zeros(mhert.x.shape)
mhert.u = np.zeros(mhert.u.shape)
mhert.y = np.zeros(mhert.y.shape)
mhert.y[:,0] += 1
mhert.yN = np.zeros(mhert.yN.shape)+1
mhert.S[0,0] = 1.0#/(xRms)**2
mhert.S[1,1] = 1.0#/(vRms)**2
mhert.SN[0,0] = 1.0#/(xRms)**2
# mheLog = rawe.ocp.ocprt.Logger(mhert,dae)
# return mheLog
def SimulateAndShift(mpcRT,mheRT,sim,simLog):
mheRT.log()
mpcRT.log()
# Get the measurement BEFORE simulating
outs = sim.getOutputs(mpcRT.x[0,:],mpcRT.u[0,:],{})
new_y = np.squeeze(outs['measurements']) + np.random.randn(2)*0.01
# Simulate the system
new_x = sim.step(mpcRT.x[0,:],mpcRT.u[0,:],{})
# Get the last measurement AFTER simulating
outs = sim.getOutputs(new_x,mpcRT.u[0,:],{})
# outsR = Rint.getOutputs(x=np.squeeze(new_x),u=mpcRT.u[0,:])
new_yN = np.array([outs['measurementsN']]) + np.random.randn(1)*0.01
simLog.log(new_x=new_x,new_y=new_y,new_yN=new_yN,new_out=outs)
# shift
mpcRT.shift()
mheRT.shift(new_y=new_y,new_yN=new_yN)
class SimLog(object):
def __init__(self,dae,sim):
self.xNames = dae.xNames()
self.outputNames = dae.outputNames()
# self.uNames = dae.uNames()
self.Ts = sim._ts
l=[]
for n in self.outputNames: l.append([])
self._log = {'x':[],'y':[],'yN':[],'outputs':dict(zip(self.outputNames,l))}
# self.log()
def log(self,new_x=None,new_y=None,new_yN=None,new_out=None):
if new_x != None:
self._log['x'].append(np.array(new_x))
if new_y != None:
self._log['y'].append(np.array(new_y))
if new_yN != None:
self._log['yN'].append(np.array(new_yN))
if new_out != None:
for name in new_out.keys():
self._log['outputs'][name].append(np.array(new_out[name]))
def _plot(self,names,title,style,when=0,showLegend=True):
if isinstance(names,str):
names = [names]
assert isinstance(names,list)
legend = []
for name in names:
assert isinstance(name,str)
legend.append(name)
# if it's a differential state
if name in self.xNames:
index = self.xNames.index(name)
ys = np.squeeze(self._log['x'])[:,index]
ts = np.arange(len(ys))*self.Ts
plt.plot(ts,ys,style)
if name in self.outputNames:
index = self.outputNames.index(name)
ys = np.squeeze(self._log['outputs'][name])
ts = np.arange(len(ys))*self.Ts
plt.plot(ts,ys,style)
if title is not None:
assert isinstance(title,str), "title must be a string"
plt.title(title)
plt.xlabel('time [s]')
if showLegend is True:
plt.legend(legend)
plt.grid()
def InitializeSim(dae,intOptions):
Ts = intOptions['ts']
if intOptions['type'] == 'Idas':
sim = rawe.sim.Sim(dae,Ts)
elif intOptions['type'] == 'Rintegrator':
from rawe.dae.rienIntegrator import RienIntegrator
nSteps = intOptions['numIntegratorSteps']
Type = intOptions['integratorType']
sim = RienIntegrator(dae,ts=Ts, numIntegratorSteps=nSteps, integratorType=Type)
else:
raise Exception('integrator not supported')
simLog = SimLog(dae,sim)
return sim, simLog
def Fig_plot(names,title=None,style='',when=0,showLegend=True,what=[],mpcLog=None,mheLog=None,simLog=None):
assert isinstance(what,list)
fig = plt.figure()
if title is None:
if isinstance(names,str):
title = names
else:
assert isinstance(names,list)
if len(names) == 1:
title = names[0]
else:
title = str(names)
fig.canvas.set_window_title(str(title))
plt.clf()
if 'mpc' in what:
if mpcLog == None: raise Exception('you must provide a mpc log to plot its variables')
mpcLog._plot(names,None,'k',when='all',showLegend=True)
if 'sim' in what:
if simLog == None: raise Exception('you must provide a sim log to plot its variables')
simLog._plot(names,None,'',when=0,showLegend=True)
if 'mhe' in what:
if mheLog == None: raise Exception('you must provide a mhe log to plot its variables')
N = mheLog._log['x'][0].shape[0]
if not isinstance(names,list):
names = [names]
if names[0] in mheLog.xNames:
mheLog._plot(names,None,'o',when=N-1,showLegend=True)
elif names[0] in mheLog.uNames:
mheLog._plot(names,None,'o',when=N-2,showLegend=True)
def Fig_subplot(names,title=None,style='',when=0,showLegend=True,what=[],mpcLog=None,mheLog=None,simLog=None):
assert isinstance(what,list)
assert isinstance(names,list)
fig = plt.figure()
if title is None:
if isinstance(names,str):
title = names
else:
assert isinstance(names,list)
if len(names) == 1:
title = names[0]
else:
title = str(names)
fig.canvas.set_window_title(str(title))
plt.clf()
n = len(names)
for k,name in enumerate(names):
plt.subplot(n,1,k+1)
if 'mpc' in what:
if mpcLog == None: raise Exception('you must provide a mpc log to plot its variables')
mpcLog._plot(name,None,'k',when='all',showLegend=True)
if 'sim' in what:
if simLog == None: raise Exception('you must provide a sim log to plot its variables')
simLog._plot(name,None,'',when=0,showLegend=True)
if 'mhe' in what:
if mheLog == None: raise Exception('you must provide a mhe log to plot its variables')
N = mheLog._log['x'][0].shape[0]
if not isinstance(name,list):
name = [name]
if name[0] in mheLog.xNames:
mheLog._plot(name,None,'o',when=N-1,showLegend=True)
elif name[0] in mheLog.uNames:
mheLog._plot(name,None,'o',when=N-2,showLegend=True)
| lgpl-3.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py | 7 | 7483 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| apache-2.0 |
kcavagnolo/astroML | book_figures/chapter6/fig_GMM_nclusters.py | 3 | 4498 | """
Number of Clusters for Gaussian Mixtures
----------------------------------------
Figure 6.9
The BIC-optimized number of components in a Gaussian mixture model as a
function of the sample size. All three samples (with 100, 1000, and 10,000
points) are drawn from the same distribution: two narrow foreground Gaussians
and two wide background Gaussians. The top-right panel shows the BIC as a
function of the number of components in the mixture. The remaining panels show
the distribution of points in the sample and the 1, 2, and 3 standard deviation
contours of the best-fit mixture model.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from sklearn.mixture import GMM
from astroML.utils import convert_2D_cov
from astroML.plotting.tools import draw_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the dataset
# We'll use scikit-learn's Gaussian Mixture Model to sample
# data from a mixture of Gaussians. The usual way of using
# this involves fitting the mixture to data: we'll see that
# below. Here we'll set the internal means, covariances,
# and weights by-hand.
# we'll define clusters as (mu, sigma1, sigma2, alpha, frac)
clusters = [((50, 50), 20, 20, 0, 0.1),
((40, 40), 10, 10, np.pi / 6, 0.6),
((80, 80), 5, 5, np.pi / 3, 0.2),
((60, 60), 30, 30, 0, 0.1)]
gmm_input = GMM(len(clusters), covariance_type='full')
gmm_input.means_ = np.array([c[0] for c in clusters])
gmm_input.covars_ = np.array([convert_2D_cov(*c[1:4]) for c in clusters])
gmm_input.weights_ = np.array([c[4] for c in clusters])
gmm_input.weights_ /= gmm_input.weights_.sum()
#------------------------------------------------------------
# Compute and plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.11, right=0.9, bottom=0.11, top=0.9,
hspace=0, wspace=0)
ax_list = [fig.add_subplot(s) for s in [221, 223, 224]]
ax_list.append(fig.add_axes([0.62, 0.62, 0.28, 0.28]))
linestyles = ['-', '--', ':']
grid = np.linspace(-5, 105, 70)
Xgrid = np.array(np.meshgrid(grid, grid))
Xgrid = Xgrid.reshape(2, -1).T
Nclusters = np.arange(1, 8)
for Npts, ax, ls in zip([100, 1000, 10000], ax_list, linestyles):
np.random.seed(1)
X = gmm_input.sample(Npts)
# find best number of clusters via BIC
clfs = [GMM(N, n_iter=500).fit(X)
for N in Nclusters]
BICs = np.array([clf.bic(X) for clf in clfs])
print("{0} points convergence:".format(Npts),
[clf.converged_ for clf in clfs])
# plot the BIC
ax_list[3].plot(Nclusters, BICs / Npts, ls, c='k',
label="N=%i" % Npts)
clf = clfs[np.argmin(BICs)]
log_dens = clf.score(Xgrid).reshape((70, 70))
# scatter the points
ax.plot(X[:, 0], X[:, 1], ',k', alpha=0.3, zorder=1)
# plot the components
for i in range(clf.n_components):
mean = clf.means_[i]
cov = clf.covars_[i]
if cov.ndim == 1:
cov = np.diag(cov)
draw_ellipse(mean, cov, ax=ax, fc='none', ec='k', zorder=2)
# label the plot
ax.text(0.05, 0.95, "N = %i points" % Npts,
ha='left', va='top', transform=ax.transAxes,
bbox=dict(fc='w', ec='k'))
ax.set_xlim(-5, 105)
ax.set_ylim(-5, 105)
ax_list[0].xaxis.set_major_formatter(plt.NullFormatter())
ax_list[2].yaxis.set_major_formatter(plt.NullFormatter())
for i in (0, 1):
ax_list[i].set_ylabel('$y$')
for j in (1, 2):
ax_list[j].set_xlabel('$x$')
ax_list[-1].legend(loc=1)
ax_list[-1].set_xlabel('n. clusters')
ax_list[-1].set_ylabel('$BIC / N$')
ax_list[-1].set_ylim(16, 18.5)
plt.show()
| bsd-2-clause |
igabr/Metis_Projects_Chicago_2017 | 04-Project-Fletcher/Phases/Phase_4/kmeans.py | 1 | 3343 | from tweepy_wrapper import *
from s3 import *
from helper_functions import *
from mongo import *
from df_functions import *
import string
import nltk
import spacy
nlp = spacy.load('en')
stopwords = nltk.corpus.stopwords.words('english')
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
from sklearn.cluster import KMeans
handles_to_follow_list = unpickle_object("final_database_lda_verified.pkl")
handle_names = unpickle_object("verified_handles_lda.pkl")
temp_follow_df = make_df(handles_to_follow_list)
temp_follow_df = filtration(temp_follow_df, "content")
handles_to_follow_list = dataframe_to_dict(temp_follow_df)
def go():
cnt = -1
for handle in handle_names:
cnt += 1
text = []
totalvocab_1 = []
totalvocab_2 = []
sentence = ""
print("processing {}".format(handle))
tweets = handles_to_follow_list[cnt][handle]['content']
for tweet in tweets:
to_process = nlp(tweet)
for token in to_process:
if token.is_space:
continue
elif token.is_punct:
continue
elif token.is_stop:
continue
elif token.is_digit:
continue
elif len(token) == 1:
continue
elif len(token) == 2:
continue
else:
sentence += str(token.lemma_) + " "
totalvocab_1.append(str(token.lemma_))
totalvocab_2.append(str(token.lemma_))
text.append(sentence)
handles_to_follow_list[cnt][handle]["temp_tfidf"] = text
temp_df = pd.DataFrame.from_dict(handles_to_follow_list[cnt], orient='index')
temp_df = filtration(temp_df, "temp_tfidf")
handles_to_follow_list[cnt] = dataframe_to_dict(temp_df)[0]
text = handles_to_follow_list[cnt][handle]["temp_tfidf"]
del handles_to_follow_list[cnt][handle]["temp_tfidf"]
vocab_frame = pd.DataFrame({'words': totalvocab_1}, index = totalvocab_2)
tfidf_vectorizer = TfidfVectorizer(max_features=200000, stop_words='english', ngram_range=(0,2))
tfidf_matrix = tfidf_vectorizer.fit_transform(text)
if tfidf_matrix.shape[0] < 10:
num_clusters = tfidf_matrix.shape[0]
else:
num_clusters = 10
terms = tfidf_vectorizer.get_feature_names()
km = KMeans(n_clusters=num_clusters, n_jobs=-1)
km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
cluster_dict = dict()
for i in range(num_clusters):
for ind in order_centroids[i, :20]:
word = str(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0])
if i not in cluster_dict:
cluster_dict[i] = [word]
else:
cluster_dict[i].append(word)
cluster_values = []
for k,v in cluster_dict.items():
cluster_values.extend(v)
counter_tfidf = Counter(cluster_values)
handles_to_follow_list[cnt][handle]['tfid_counter'] = counter_tfidf
counter_lda = handles_to_follow_list[cnt][handle]["LDA"]
tfidf_set = set()
lda_set = set()
for key, value in counter_tfidf.items():
tfidf_set.add(key)
for key, value in counter_lda.items():
lda_set.add(key)
intersection_set = lda_set.intersection(tfidf_set)
handles_to_follow_list[cnt][handle]["lda_tfid_intersection"] = intersection_set
print("loop complete for {}".format(handle))
pickle_object(handles_to_follow_list, "FINAL_2ND_DEGREE_DATABASE_LDA_TFIDF_VERIFIED.pkl")
print("Script Complete")
if __name__ == '__main__':
go()
| mit |
cl4rke/scikit-learn | sklearn/externals/joblib/parallel.py | 36 | 34375 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
sureshthalamati/spark | python/pyspark/sql/session.py | 2 | 34711 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self.conf.get("spark.sql.execution.arrow.fallback.enabled", "true") \
.lower() == "true":
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempts non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"For fallback to non-optimization automatically, please set true to "
"'spark.sql.execution.arrow.fallback.enabled'." % _exception_message(e))
raise RuntimeError(msg)
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
planetarymike/IDL-Colorbars | IDL_py_test/019_Hue_Sat_Lightness_1.py | 1 | 8638 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0.984314, 0., 0.0117647],
[0.984314, 0., 0.0117647],
[0.984314, 0., 0.0352941],
[0.984314, 0.00392157, 0.0588235],
[0.988235, 0.00392157, 0.0862745],
[0.988235, 0.00784314, 0.109804],
[0.992157, 0.00784314, 0.133333],
[0.988235, 0.0156863, 0.160784],
[0.988235, 0.0196078, 0.188235],
[0.984314, 0.027451, 0.215686],
[0.984314, 0.0313725, 0.243137],
[0.980392, 0.027451, 0.258824],
[0.980392, 0.0352941, 0.286275],
[0.976471, 0.0509804, 0.317647],
[0.976471, 0.0313725, 0.329412],
[0.976471, 0.0588235, 0.368627],
[0.972549, 0.0666667, 0.392157],
[0.972549, 0.0705882, 0.415686],
[0.968627, 0.0784314, 0.439216],
[0.968627, 0.0823529, 0.462745],
[0.964706, 0.0901961, 0.486275],
[0.964706, 0.0941176, 0.509804],
[0.964706, 0.101961, 0.529412],
[0.960784, 0.105882, 0.552941],
[0.960784, 0.109804, 0.576471],
[0.956863, 0.117647, 0.596078],
[0.956863, 0.121569, 0.615686],
[0.952941, 0.129412, 0.635294],
[0.952941, 0.133333, 0.658824],
[0.952941, 0.137255, 0.678431],
[0.94902, 0.145098, 0.698039],
[0.94902, 0.14902, 0.717647],
[0.945098, 0.156863, 0.733333],
[0.945098, 0.160784, 0.752941],
[0.945098, 0.164706, 0.772549],
[0.941176, 0.172549, 0.788235],
[0.941176, 0.176471, 0.807843],
[0.941176, 0.180392, 0.823529],
[0.937255, 0.188235, 0.843137],
[0.937255, 0.192157, 0.858824],
[0.937255, 0.196078, 0.87451],
[0.933333, 0.203922, 0.890196],
[0.933333, 0.207843, 0.905882],
[0.929412, 0.215686, 0.921569],
[0.921569, 0.219608, 0.929412],
[0.901961, 0.223529, 0.929412],
[0.886275, 0.231373, 0.92549],
[0.870588, 0.235294, 0.92549],
[0.85098, 0.239216, 0.92549],
[0.835294, 0.247059, 0.921569],
[0.819608, 0.25098, 0.921569],
[0.803922, 0.254902, 0.921569],
[0.788235, 0.258824, 0.921569],
[0.772549, 0.266667, 0.917647],
[0.760784, 0.270588, 0.917647],
[0.745098, 0.27451, 0.917647],
[0.729412, 0.282353, 0.913725],
[0.717647, 0.286275, 0.913725],
[0.701961, 0.290196, 0.913725],
[0.690196, 0.298039, 0.909804],
[0.678431, 0.301961, 0.909804],
[0.662745, 0.305882, 0.909804],
[0.65098, 0.309804, 0.909804],
[0.639216, 0.317647, 0.905882],
[0.627451, 0.321569, 0.905882],
[0.615686, 0.32549, 0.905882],
[0.603922, 0.329412, 0.901961],
[0.592157, 0.333333, 0.901961],
[0.580392, 0.337255, 0.901961],
[0.568627, 0.341176, 0.901961],
[0.560784, 0.345098, 0.901961],
[0.54902, 0.352941, 0.898039],
[0.537255, 0.356863, 0.898039],
[0.529412, 0.360784, 0.898039],
[0.521569, 0.364706, 0.898039],
[0.509804, 0.372549, 0.894118],
[0.501961, 0.376471, 0.894118],
[0.494118, 0.380392, 0.894118],
[0.486275, 0.384314, 0.894118],
[0.478431, 0.388235, 0.894118],
[0.470588, 0.396078, 0.890196],
[0.462745, 0.4, 0.890196],
[0.454902, 0.403922, 0.890196],
[0.447059, 0.407843, 0.890196],
[0.439216, 0.411765, 0.890196],
[0.435294, 0.419608, 0.886275],
[0.427451, 0.423529, 0.886275],
[0.427451, 0.431373, 0.886275],
[0.431373, 0.447059, 0.886275],
[0.435294, 0.462745, 0.886275],
[0.443137, 0.478431, 0.882353],
[0.447059, 0.490196, 0.882353],
[0.45098, 0.505882, 0.882353],
[0.454902, 0.517647, 0.882353],
[0.458824, 0.533333, 0.882353],
[0.462745, 0.545098, 0.882353],
[0.466667, 0.560784, 0.882353],
[0.47451, 0.572549, 0.878431],
[0.478431, 0.584314, 0.878431],
[0.482353, 0.596078, 0.878431],
[0.490196, 0.611765, 0.878431],
[0.490196, 0.619608, 0.878431],
[0.494118, 0.631373, 0.878431],
[0.498039, 0.643137, 0.878431],
[0.501961, 0.654902, 0.878431],
[0.505882, 0.666667, 0.878431],
[0.513725, 0.678431, 0.87451],
[0.517647, 0.686275, 0.87451],
[0.521569, 0.698039, 0.87451],
[0.529412, 0.709804, 0.87451],
[0.533333, 0.721569, 0.87451],
[0.537255, 0.729412, 0.87451],
[0.541176, 0.737255, 0.87451],
[0.545098, 0.74902, 0.87451],
[0.54902, 0.756863, 0.87451],
[0.54902, 0.764706, 0.87451],
[0.552941, 0.772549, 0.87451],
[0.556863, 0.780392, 0.87451],
[0.564706, 0.788235, 0.870588],
[0.568627, 0.796078, 0.870588],
[0.572549, 0.803922, 0.870588],
[0.576471, 0.811765, 0.870588],
[0.580392, 0.819608, 0.870588],
[0.584314, 0.827451, 0.870588],
[0.588235, 0.835294, 0.870588],
[0.592157, 0.843137, 0.870588],
[0.596078, 0.847059, 0.870588],
[0.6, 0.854902, 0.870588],
[0.603922, 0.862745, 0.870588],
[0.607843, 0.866667, 0.870588],
[0.611765, 0.870588, 0.866667],
[0.615686, 0.870588, 0.862745],
[0.619608, 0.870588, 0.854902],
[0.623529, 0.870588, 0.85098],
[0.627451, 0.870588, 0.847059],
[0.631373, 0.870588, 0.839216],
[0.635294, 0.870588, 0.835294],
[0.639216, 0.870588, 0.831373],
[0.643137, 0.870588, 0.827451],
[0.647059, 0.870588, 0.823529],
[0.65098, 0.870588, 0.815686],
[0.654902, 0.870588, 0.811765],
[0.658824, 0.870588, 0.807843],
[0.662745, 0.870588, 0.807843],
[0.666667, 0.870588, 0.803922],
[0.670588, 0.870588, 0.8],
[0.670588, 0.87451, 0.796078],
[0.67451, 0.87451, 0.792157],
[0.682353, 0.87451, 0.792157],
[0.682353, 0.87451, 0.788235],
[0.686275, 0.87451, 0.784314],
[0.690196, 0.87451, 0.784314],
[0.694118, 0.87451, 0.780392],
[0.698039, 0.87451, 0.776471],
[0.701961, 0.87451, 0.776471],
[0.705882, 0.87451, 0.772549],
[0.709804, 0.87451, 0.772549],
[0.713725, 0.87451, 0.772549],
[0.717647, 0.87451, 0.768627],
[0.721569, 0.878431, 0.772549],
[0.72549, 0.878431, 0.768627],
[0.72549, 0.878431, 0.768627],
[0.729412, 0.878431, 0.764706],
[0.733333, 0.878431, 0.764706],
[0.737255, 0.878431, 0.764706],
[0.741176, 0.878431, 0.764706],
[0.745098, 0.878431, 0.764706],
[0.745098, 0.882353, 0.764706],
[0.74902, 0.882353, 0.764706],
[0.752941, 0.882353, 0.764706],
[0.756863, 0.882353, 0.764706],
[0.760784, 0.882353, 0.764706],
[0.764706, 0.882353, 0.764706],
[0.768627, 0.882353, 0.768627],
[0.772549, 0.886275, 0.768627],
[0.780392, 0.886275, 0.772549],
[0.784314, 0.886275, 0.776471],
[0.792157, 0.886275, 0.780392],
[0.796078, 0.886275, 0.784314],
[0.8, 0.886275, 0.788235],
[0.807843, 0.890196, 0.788235],
[0.811765, 0.890196, 0.792157],
[0.815686, 0.890196, 0.796078],
[0.823529, 0.890196, 0.8],
[0.827451, 0.890196, 0.803922],
[0.831373, 0.894118, 0.803922],
[0.835294, 0.894118, 0.807843],
[0.839216, 0.894118, 0.811765],
[0.843137, 0.894118, 0.815686],
[0.847059, 0.894118, 0.819608],
[0.85098, 0.898039, 0.819608],
[0.854902, 0.898039, 0.823529],
[0.858824, 0.898039, 0.827451],
[0.862745, 0.898039, 0.831373],
[0.862745, 0.898039, 0.831373],
[0.866667, 0.898039, 0.835294],
[0.870588, 0.901961, 0.835294],
[0.87451, 0.901961, 0.839216],
[0.878431, 0.901961, 0.843137],
[0.882353, 0.901961, 0.847059],
[0.886275, 0.905882, 0.847059],
[0.886275, 0.905882, 0.85098],
[0.890196, 0.909804, 0.854902],
[0.894118, 0.909804, 0.854902],
[0.894118, 0.909804, 0.858824],
[0.898039, 0.909804, 0.862745],
[0.901961, 0.909804, 0.866667],
[0.901961, 0.913725, 0.866667],
[0.905882, 0.913725, 0.870588],
[0.909804, 0.913725, 0.87451],
[0.909804, 0.917647, 0.87451],
[0.913725, 0.917647, 0.878431],
[0.913725, 0.917647, 0.882353],
[0.917647, 0.917647, 0.886275],
[0.921569, 0.921569, 0.886275],
[0.921569, 0.921569, 0.890196],
[0.921569, 0.921569, 0.894118],
[0.92549, 0.92549, 0.894118],
[0.92549, 0.92549, 0.898039],
[0.92549, 0.92549, 0.901961],
[0.929412, 0.92549, 0.901961],
[0.929412, 0.92549, 0.905882],
[0.929412, 0.929412, 0.909804],
[0.933333, 0.929412, 0.909804],
[0.933333, 0.929412, 0.913725],
[0.937255, 0.929412, 0.913725],
[0.937255, 0.933333, 0.917647],
[0.937255, 0.933333, 0.921569],
[0.941176, 0.933333, 0.921569],
[0.941176, 0.937255, 0.92549],
[0.941176, 0.937255, 0.929412],
[0.945098, 0.937255, 0.929412],
[0.94902, 0.941176, 0.933333],
[0.94902, 0.941176, 0.933333],
[0.94902, 0.945098, 0.937255],
[0.94902, 0.945098, 0.941176],
[0.952941, 0.94902, 0.941176],
[0.952941, 0.94902, 0.945098],
[0.956863, 0.94902, 0.945098],
[0.956863, 0.952941, 0.94902],
[0.956863, 0.952941, 0.952941],
[0.960784, 0.956863, 0.952941],
[0.960784, 0.956863, 0.956863],
[0.964706, 0.960784, 0.956863],
[0.964706, 0.960784, 0.960784],
[0.964706, 0.964706, 0.964706],
[0.968627, 0.964706, 0.964706],
[0.968627, 0.968627, 0.968627],
[0.972549, 0.968627, 0.968627],
[0.972549, 0.972549, 0.972549],
[0.976471, 0.972549, 0.972549],
[0.976471, 0.976471, 0.976471],
[0.980392, 0.976471, 0.976471],
[0.980392, 0.980392, 0.980392],
[0.984314, 0.980392, 0.980392],
[0.984314, 0.980392, 0.980392]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
intel-analytics/analytics-zoo | pyzoo/zoo/chronos/model/tcmf/DeepGLO.py | 1 | 32279 | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from the DeepGlo Project. https://github.com/rajatsen91/deepglo
#
# Note: This license has also been called the "New BSD License" or "Modified BSD License". See also
# the 2-clause BSD License.
#
# Copyright (c) 2019 The DeepGLO Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from zoo.chronos.model.tcmf.data_loader import TCMFDataLoader
from zoo.chronos.model.tcmf.local_model import TemporalConvNet, LocalModel
from zoo.chronos.model.tcmf.time import TimeCovariates
import copy
import pickle
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logger.addHandler(console)
def get_model(A, y, lamb=0):
"""
Regularized least-squares
"""
n_col = A.shape[1]
return np.linalg.lstsq(
A.T.dot(A) + lamb * np.identity(n_col), A.T.dot(y), rcond=None
)
class DeepGLO(object):
def __init__(
self,
vbsize=150,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 1],
num_channels_Y=[32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
rank=64,
kernel_size_Y=7,
lr=0.0005,
normalize=False,
use_time=True,
svd=False,
forward_cov=False,
):
self.use_time = use_time
self.dropout = dropout
self.forward_cov = forward_cov
self.Xseq = TemporalConvNet(
num_inputs=1,
num_channels=num_channels_X,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
self.vbsize = vbsize
self.hbsize = hbsize
self.num_channels_X = num_channels_X
self.num_channels_Y = num_channels_Y
self.kernel_size_Y = kernel_size_Y
self.rank = rank
self.kernel_size = kernel_size
self.lr = lr
self.normalize = normalize
self.svd = svd
def tensor2d_to_temporal(self, T):
T = T.view(1, T.size(0), T.size(1))
T = T.transpose(0, 1)
return T
def temporal_to_tensor2d(self, T):
T = T.view(T.size(0), T.size(2))
return T
def calculate_newX_loss_vanilla(self, Xn, Fn, Yn, Xf, alpha):
Yout = torch.mm(Fn, Xn)
cr1 = nn.L1Loss()
cr2 = nn.MSELoss()
l1 = cr2(Yout, Yn) / torch.mean(Yn ** 2)
l2 = cr2(Xn, Xf) / torch.mean(Xf ** 2)
return (1 - alpha) * l1 + alpha * l2
def recover_future_X(
self,
last_step,
future,
num_epochs=50,
alpha=0.5,
vanilla=True,
tol=1e-7,
):
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg: last_step]
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(model=self.Xseq, inp=X, future=future)
outX = self.temporal_to_tensor2d(outX)
Xf = outX[:, -future::]
Yn = self.Ymat[:, last_step: last_step + future]
Yn = torch.from_numpy(Yn).float()
Fn = self.F
Xt = torch.zeros(self.rank, future).float()
Xn = torch.normal(Xt, 0.1)
lprev = 0
for i in range(num_epochs):
Xn = Variable(Xn, requires_grad=True)
optim_Xn = optim.Adam(params=[Xn], lr=self.lr)
optim_Xn.zero_grad()
loss = self.calculate_newX_loss_vanilla(
Xn, Fn.detach(), Yn.detach(), Xf.detach(), alpha
)
loss.backward()
optim_Xn.step()
# Xn = torch.clamp(Xn.detach(), min=0)
if np.abs(lprev - loss.item()) <= tol:
break
if i % 1000 == 0:
print(f"Recovery Loss of epoch {i} is: " + str(loss.item()))
lprev = loss.item()
return Xn.detach()
def step_factX_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
Xout = Variable(Xout, requires_grad=True)
out = self.temporal_to_tensor2d(out)
optim_X = optim.Adam(params=[Xout], lr=self.lr)
Hout = torch.matmul(Fout, Xout)
optim_X.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Xout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_X.step()
# Xout = torch.clamp(Xout, min=0)
self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)] = Xout.detach()
return loss
def step_factF_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
Fout = Variable(Fout, requires_grad=True)
optim_F = optim.Adam(params=[Fout], lr=self.lr)
out = self.temporal_to_tensor2d(out)
Hout = torch.matmul(Fout, Xout)
optim_F.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Fout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_F.step()
self.F[
self.D.I[last_vindex: last_vindex + inp.size(0)], :
] = Fout.detach()
return loss
def step_temporal_loss_X(self, inp, last_vindex, last_hindex):
Xin = self.X[:, last_hindex: last_hindex + inp.size(2)]
Xout = self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)]
for p in self.Xseq.parameters():
p.requires_grad = False
Xin = Variable(Xin, requires_grad=True)
Xout = Variable(Xout, requires_grad=True)
optim_out = optim.Adam(params=[Xout], lr=self.lr)
Xin = self.tensor2d_to_temporal(Xin)
Xout = self.tensor2d_to_temporal(Xout)
hatX = self.Xseq(Xin)
optim_out.zero_grad()
loss = torch.mean(torch.pow(Xout - hatX.detach(), 2))
loss.backward()
optim_out.step()
# Xout = torch.clamp(Xout, min=0)
temp = self.temporal_to_tensor2d(Xout.detach())
self.X[:, last_hindex + 1: last_hindex + 1 + inp.size(2)] = temp
return loss
def predict_future_batch(self, model, inp, future=10):
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
for i in range(future - 1):
inp = out
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
out = self.temporal_to_tensor2d(out)
out = np.array(out.detach())
return out
def predict_future(self, model, inp, future=10, bsize=90):
n = inp.size(0)
ids = np.arange(0, n, bsize)
ids = list(ids) + [n]
out = self.predict_future_batch(model, inp[ids[0]: ids[1], :, :], future)
for i in range(1, len(ids) - 1):
temp = self.predict_future_batch(
model, inp[ids[i]: ids[i + 1], :, :], future
)
out = np.vstack([out, temp])
out = torch.from_numpy(out).float()
return self.tensor2d_to_temporal(out)
def predict_global(
self, ind, last_step=100, future=10, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg: last_step]
n = X.size(0)
T = X.size(1)
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(
model=self.Xseq, inp=X, future=future, bsize=bsize
)
outX = self.temporal_to_tensor2d(outX)
F = self.F
Y = torch.matmul(F, outX)
Y = np.array(Y[ind, :].detach())
del F
for p in self.Xseq.parameters():
p.requires_grad = True
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def train_Xseq(self, Ymat, num_epochs=20, val_len=24, early_stop=False, tenacity=3):
seq = self.Xseq
num_channels = self.num_channels_X
kernel_size = self.kernel_size
vbsize = min(self.vbsize, Ymat.shape[0] / 2)
for p in seq.parameters():
p.requires_grad = True
TC = LocalModel(
Ymat=Ymat,
num_inputs=1,
num_channels=num_channels,
kernel_size=kernel_size,
vbsize=vbsize,
hbsize=self.hbsize,
normalize=False,
end_index=self.end_index - val_len,
val_len=val_len,
lr=self.lr,
)
TC.train_model(num_epochs=num_epochs, early_stop=early_stop, tenacity=tenacity)
self.Xseq = TC.seq
def train_factors(
self,
reg_X=0.0,
reg_F=0.0,
mod=5,
val_len=24,
early_stop=False,
tenacity=3,
ind=None,
seed=False,
):
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
for p in self.Xseq.parameters():
p.requires_grad = True
l_F = [0.0]
l_X = [0.0]
l_X_temporal = [0.0]
iter_count = 0
vae = float("inf")
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch()
step_l_F = self.step_factF_loss(inp, out, last_vindex, last_hindex, reg=reg_F)
l_F = l_F + [step_l_F.item()]
step_l_X = self.step_factX_loss(inp, out, last_vindex, last_hindex, reg=reg_X)
l_X = l_X + [step_l_X.item()]
if seed is False and iter_count % mod == 1:
l2 = self.step_temporal_loss_X(inp, last_vindex, last_hindex)
l_X_temporal = l_X_temporal + [l2.item()]
iter_count = iter_count + 1
if self.D.epoch > last_epoch:
print("Entering Epoch#{}".format(self.D.epoch))
print("Factorization Loss F:{}".format(np.mean(l_F)))
print("Factorization Loss X:{}".format(np.mean(l_X)))
print("Temporal Loss X:{}".format(np.mean(l_X_temporal)))
if ind is None:
ind = np.arange(self.Ymat.shape[0])
else:
ind = ind
inp = self.predict_global(
ind,
last_step=self.end_index - val_len,
future=val_len,
)
R = self.Ymat[ind, self.end_index - val_len: self.end_index]
S = inp[:, -val_len::]
ve = np.abs(R - S).mean() / np.abs(R).mean()
# print("Validation Loss (Global): ", ve)
print("Validation Loss (Global):{}".format(ve))
if ve <= vae:
vae = ve
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
# Xseqbest = TemporalConvNet(
# num_inputs=1,
# num_channels=self.num_channels_X,
# kernel_size=self.kernel_size,
# dropout=self.dropout,
# )
# Xseqbest.load_state_dict(self.Xseq.state_dict())
Xseqbest = pickle.loads(pickle.dumps(self.Xseq))
else:
scount += 1
if scount > tenacity and early_stop:
# print("Early Stopped")
print("Early Stopped")
self.X = Xbest
self.F = Fbest
self.Xseq = Xseqbest
break
def create_Ycov(self):
t0 = self.end_index + 1
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
Ycov = copy.deepcopy(self.Ymat[:, 0:t0])
Ymat_now = self.Ymat[:, 0:t0]
self.Xseq = self.Xseq.eval()
while self.D.epoch < 1:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch()
Xin = self.tensor2d_to_temporal(self.X[:, last_hindex: last_hindex + inp.size(2)])
Xout = self.temporal_to_tensor2d(self.Xseq(Xin))
Fout = self.F[self.D.I[last_vindex: last_vindex + out.size(0)], :]
output = np.array(torch.matmul(Fout, Xout).detach())
Ycov[
last_vindex: last_vindex + output.shape[0],
last_hindex + 1: last_hindex + 1 + output.shape[1],
] = output
for p in self.Xseq.parameters():
p.requires_grad = True
if self.period is None:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 1, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
else:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 2, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
Ycov_wc[:, 1, self.period - 1::] = Ymat_now[:, 0: -(self.period - 1)]
return Ycov_wc
def train_Yseq(self, num_epochs=20,
covariates=None,
dti=None,
val_len=24,
num_workers=1,
):
Ycov = self.create_Ycov()
self.Yseq = LocalModel(
self.Ymat,
num_inputs=1,
num_channels=self.num_channels_Y,
kernel_size=self.kernel_size_Y,
dropout=self.dropout,
vbsize=self.vbsize,
hbsize=self.hbsize,
lr=self.lr,
val_len=val_len,
test=True,
end_index=self.end_index - val_len,
normalize=False,
start_date=self.start_date,
freq=self.freq,
covariates=covariates,
use_time=self.use_time,
dti=dti,
Ycov=Ycov,
)
val_loss = self.Yseq.train_model(num_epochs=num_epochs,
num_workers=num_workers,
early_stop=False)
return val_loss
def train_all_models(
self,
Ymat,
val_len=24,
start_date="2016-1-1",
freq="H",
covariates=None,
dti=None,
period=None,
init_epochs=100,
alt_iters=10,
y_iters=200,
tenacity=7,
mod=5,
max_FX_epoch=300,
max_TCN_epoch=300,
num_workers=1,
):
self.end_index = Ymat.shape[1]
self.start_date = start_date
self.freq = freq
self.period = period
self.covariates = covariates
self.dti = dti
if self.normalize:
self.s = np.std(Ymat[:, 0:self.end_index], axis=1)
# self.s[self.s == 0] = 1.0
self.s += 1.0
self.m = np.mean(Ymat[:, 0:self.end_index], axis=1)
self.Ymat = (Ymat - self.m[:, None]) / self.s[:, None]
self.mini = np.abs(np.min(self.Ymat))
self.Ymat = self.Ymat + self.mini
else:
self.Ymat = Ymat
n, T = self.Ymat.shape
t0 = self.end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
if self.svd:
indices = np.random.choice(self.Ymat.shape[0], self.rank, replace=False)
X = self.Ymat[indices, 0:t0]
mX = np.std(X, axis=1)
mX[mX == 0] = 1.0
X = X / mX[:, None]
Ft = get_model(X.transpose(), self.Ymat[:, 0:t0].transpose(), lamb=0.1)
F = Ft[0].transpose()
self.X = torch.from_numpy(X).float()
self.F = torch.from_numpy(F).float()
else:
R = torch.zeros(self.rank, t0).float()
X = torch.normal(R, 0.1)
C = torch.zeros(n, self.rank).float()
F = torch.normal(C, 0.1)
self.X = X.float()
self.F = F.float()
self.D = TCMFDataLoader(
Ymat=self.Ymat,
vbsize=self.vbsize,
hbsize=self.hbsize,
end_index=self.end_index,
val_len=val_len,
shuffle=False,
)
# print("-"*50+"Initializing Factors.....")
logger.info("Initializing Factors")
self.num_epochs = init_epochs
self.train_factors(val_len=val_len)
if alt_iters % 2 == 1:
alt_iters += 1
# print("Starting Alternate Training.....")
logger.info("Starting Alternate Training.....")
for i in range(1, alt_iters):
if i % 2 == 0:
logger.info("Training Factors. Iter#:{}".format(i))
self.num_epochs = max_FX_epoch
self.train_factors(
seed=False, val_len=val_len,
early_stop=True, tenacity=tenacity, mod=mod
)
else:
# logger.info(
# "--------------------------------------------Training Xseq Model. Iter#:{}"
# .format(i)
# + "-------------------------------------------------------"
# )
logger.info("Training Xseq Model. Iter#:{}".format(i))
self.num_epochs = max_TCN_epoch
T = np.array(self.X.detach())
self.train_Xseq(
Ymat=T,
num_epochs=self.num_epochs,
val_len=val_len,
early_stop=True,
tenacity=tenacity,
)
logger.info("Start training Yseq.....")
val_loss = self.train_Yseq(num_epochs=y_iters,
covariates=covariates,
dti=dti,
val_len=val_len,
num_workers=num_workers,
)
return val_loss
def append_new_y(self, Ymat_new, covariates_new=None, dti_new=None):
# update Yseq
# normalize the incremented Ymat if needed
if self.normalize:
Ymat_new = (Ymat_new - self.m[:, None]) / self.s[:, None]
Ymat_new = Ymat_new + self.mini
# append the new Ymat onto the original, note that self.end_index equals to the no.of time
# steps of the original.
n, T_added = Ymat_new.shape
self.Ymat = np.concatenate((self.Ymat[:, : self.end_index], Ymat_new), axis=1)
self.end_index = self.end_index + T_added
n, T = self.Ymat.shape
t0 = self.end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
# update Yseq.covariates
last_step = self.end_index - T_added
new_covariates = self.get_future_time_covs(T_added, last_step,
future_covariates=covariates_new,
future_dti=dti_new)
self.Yseq.covariates = np.hstack([self.Yseq.covariates[:, :last_step], new_covariates])
def inject_new(self,
Ymat_new,
covariates_new=None,
dti_new=None):
if self.Ymat.shape[0] != Ymat_new.shape[0]:
raise ValueError("Expected incremental input with {} time series, got {} instead."
.format(self.Ymat.shape[0], Ymat_new.shape[0]))
self.append_new_y(Ymat_new, covariates_new=covariates_new, dti_new=dti_new)
n, T = self.Ymat.shape
rank, XT = self.X.shape
future = T - XT
Xn = self.recover_future_X(
last_step=XT,
future=future,
num_epochs=100000,
alpha=0.3,
vanilla=True,
)
self.X = torch.cat([self.X, Xn], dim=1)
def get_time_covs(self, future_start_date, num_ts, future_covariates, future_dti):
if self.use_time:
future_time = TimeCovariates(
start_date=future_start_date,
freq=self.freq,
normalized=True,
num_ts=num_ts
)
if future_dti is not None:
future_time.dti = future_dti
time_covariates = future_time.get_covariates()
if future_covariates is None:
covariates = time_covariates
else:
covariates = np.vstack([time_covariates, future_covariates])
else:
covariates = future_covariates
return covariates
def get_future_time_covs(self, horizon, last_step, future_covariates, future_dti):
if self.freq[0].isalpha():
freq = "1" + self.freq
else:
freq = self.freq
future_start_date = pd.Timestamp(self.start_date) + pd.Timedelta(freq) * last_step
covs_future = self.get_time_covs(future_start_date=future_start_date,
num_ts=horizon,
future_covariates=future_covariates,
future_dti=future_dti)
return covs_future
def get_prediction_time_covs(self, rg, horizon, last_step, future_covariates, future_dti):
covs_past = self.Yseq.covariates[:, last_step - rg: last_step]
covs_future = self.get_future_time_covs(horizon, last_step, future_covariates, future_dti)
covs = np.concatenate([covs_past, covs_future], axis=1)
return covs
def predict_horizon(
self,
ind=None,
future=10,
future_covariates=None,
future_dti=None,
bsize=90,
num_workers=1,
):
last_step = self.end_index
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.get_prediction_time_covs(rg, future, last_step, future_covariates, future_dti)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
# The last coordinate is not used.
ycovs[:, 1, period - 1::] = self.Ymat[
:, last_step - rg: last_step + future - (period - 1)]
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg: last_step],
covariates=covs,
ycovs=ycovs,
future=future,
bsize=bsize,
normalize=False,
num_workers=num_workers,
)
if self.normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def predict(
self, ind=None, last_step=100, future=10, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
self.Xseq = self.Xseq
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.Yseq.covariates[:, last_step - rg: last_step + future]
# print(covs.shape)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
# this seems like we are looking ahead, but it will not use the last coordinate,
# which is the only new point added
ycovs[:, 1, period - 1::] = self.Ymat[
:, last_step - rg: last_step + future - (period - 1)]
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg: last_step],
covariates=covs,
ycovs=ycovs,
future=future,
bsize=bsize,
normalize=False,
)
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, alpha=0.3):
prevX = self.X.clone()
prev_index = self.end_index
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
normalize=self.normalize,
bsize=bsize,
)
predicted_values = []
actual_values = []
predicted_values_global = []
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index: self.end_index + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
self.Xseq = self.Xseq.eval()
self.Yseq.seq = self.Yseq.seq.eval()
for i in range(n - 1):
Xn = self.recover_future_X(
last_step=self.end_index + 1,
future=tau,
num_epochs=100000,
alpha=alpha,
vanilla=True
)
self.X = torch.cat([self.X, Xn], dim=1)
self.end_index += tau
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
normalize=self.normalize,
bsize=bsize,
)
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index: self.end_index + tau]
actual_values += [R]
print("Current window wape:{}".format(wape(S, R)))
predicted = np.hstack(predicted_values)
predicted_global = np.hstack(predicted_values_global)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
dic["wape_global"] = wape(predicted_global, actual)
dic["mape_global"] = mape(predicted_global, actual)
dic["smape_global"] = smape(predicted_global, actual)
dic["mae_global"] = np.abs(predicted_global - actual).mean()
dic["rmse_global"] = np.sqrt(((predicted_global - actual) ** 2).mean())
dic["nrmse_global"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau: Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
self.X = prevX
self.end_index = prev_index
return dic
| apache-2.0 |
ProkopHapala/SimpleSimulationEngine | projects/SpaceCombat/py/timeToDistance.py | 1 | 10101 | #!/usr/bin/python
# compute distance by integration of tsielkovsky equation
# http://physics.stackexchange.com/questions/161358/how-can-i-caculate-the-time-to-traverse-a-distance-with-the-tsiolkovsky-equation
# v(t) = v0 + vexh log( mtot /( mtot - f*t ) )
# s(t) = ( t - mtot/dm ) * vexh log( mtot /( mtot - dm*t ) ) + t*(v0 + ve )
# s(t) = tburn * ( t/tburn - mtot/mprop ) * vexh log( mtot /( mtot - dm*t ) ) + t*(v0 + ve )
import numpy as np
import matplotlib.pylab as plt
# ========== Functions
def timeToDistace( times, specPower, vexh, mPayload, mEngine, mPropelant, nodes = np.linspace( 0.0,1.0,10 ) ):
mTot = mPayload + mEngine + mPropelant
mDry = mPayload + mEngine
power = specPower * mEngine
thrust = power / vexh
massFlow = thrust / vexh
burnTime = mPropelant / massFlow
ts = nodes * burnTime
vt = vexh * np.log( mTot /( mTot - massFlow*ts ) )
st = vexh * ( ts - mTot/massFlow ) * np.log( mTot /( mTot - massFlow*ts ) ) + ts*vexh
if times[-1] > ts[-1]:
st = np.append( st, st[-1] + vt[-1]*(times[-1] - ts[-1]) )
vt = np.append( vt, vt[-1] )
ts = np.append( ts, times[-1] )
st_ = np.interp( times, ts, st )
vt_ = np.interp( times, ts, vt )
mask = times < ts[1]
st_[mask] = 0.5 * (thrust/mTot) * times[mask]**2
return st_, vt_
def combineSpecPower( thruster, powerPlant ):
combined= 1.0/( 1.0/thruster + 1.0/powerPlant )
#print( "thruster, powerPlant, combined", thruster, powerPlant, combined )
return combined
# ========== Evaluation of ships
'''
=============== Power plants =====================
Jet engine 10.0e+3 W/kg https://en.wikipedia.org/wiki/Power-to-weight_ratio
Baryton turbine max 150.0e+3 W/kg https://en.wikipedia.org/wiki/Power-to-weight_ratio
Brushless generator 10.0e+3 W/kg https://en.wikipedia.org/wiki/Power-to-weight_ratio
High power density generator 10.0e+3 W/kg DOI: 10.1109/IECEC.1989.74606
Power[W] Weight W/kg
NERVA
MITEE 0.3400E+6
TOPAZ 5.0e+3 320.0 15.625 https://en.wikipedia.org/wiki/TOPAZ_nuclear_reactor
Bimodal 25.0e+3 2224.0 11.24
phovolto 40.0 8.0e-3 5000.0
=> MITEE + Baryton turbine + High power density generator
conservative 3e+3 W/kg
optimistic 30e+3 W/kg
=============== Thrusters plants =====================
Power[W] Mass[W] Exhaust
VASIMR_high 6.0e+6 10000kg 294e+3 http://www.projectrho.com/public_html/rocket/enginelist.php#id--Electrothermal--Resistojet
VASIMR_med 6.0e+6 10000kg 147e+3
VASIMR_low 6.0e+6 10000kg 29e+3
Mass driver 0.3e+12 150e+3 30e+3
Coloide Truster 172e+6 20e+3 43e+3
Ion thruster Cs 1050e+6 400e+3 210e+3
'''
nucConserv = combineSpecPower( 10e+3 , combineSpecPower( 330e+3/5 , 150e+3 ) )
nucOptimist = combineSpecPower( 150e+3 , combineSpecPower( 330e+3/5 , 150e+3 ) )
BimodalVASMIR = combineSpecPower( 6.0e+6/10.0e+3 , 25.0e+3/2224.0 )
photovoltVASIMR = combineSpecPower( 6.0e+6/10.0e+3 , 40.0/8.0e-3 )
photoMassDriver = combineSpecPower( 0.3e+12/150e+3 , 40.0/8.0e-3 )
photoColoid = combineSpecPower( 172e+6/20e+3 , 40.0/8.0e-3 )
photoCesium = combineSpecPower( 1050e+6/400e+3 , 40.0/8.0e-3 )
nucConservMassDriver = combineSpecPower( 0.3e+12/150e+3 , nucConserv )
nucConservColoid = combineSpecPower( 172e+6/20e+3 , nucConserv )
nucConservCesium = combineSpecPower( 1050e+6/400e+3 , nucConserv )
'''
ships=[
# spec.Power [W/kg] vexh [m/s] payload engine propelant description color line
[ 1.2711e+6, 4400.0, 0.25, 0.25, 0.50, 'SSME H2+LOX 4.4km/s 1:1: 2', '#FF0000', '-' ],
[ 0.3400e+6, 9800.0, 0.25, 0.25, 0.50, 'MITEE H2 9.8km/s 1:1: 2', '#FF8000', '-' ],
[ photoMassDriver, 30e+3, 0.25, 0.25, 0.50, 'MassDriverPhovoltaic 30 km/s 1:1: 2', '#8080FF', '-' ],
[ photoColoid, 43e+3, 0.25, 0.25, 0.50, 'ColoidElecPhovoltaic 43 km/s 1:1: 2', '#008000', '-' ],
[ photoCesium, 210e+3, 0.25, 0.25, 0.50, 'Cs+IonElecPhovoltaic 200 km/s 1:1: 2', '#0080FF', '-' ],
[ 1.2711E+006, 4400.0, 0.05, 0.05, 0.90, 'SSME H2+LOX 4.4km/s 1:1:18', '#FF0000', '--' ],
[ 0.3400E+006, 9800.0, 0.05, 0.05, 0.90, 'MITEE H2 9.8km/s 1:1:18', '#FF8000', '--' ],
[ photoMassDriver, 30e+3, 0.05, 0.05, 0.90, 'MassDriverPhovoltaic 30 km/s 1:1:18', '#8080FF', '--' ],
[ photoColoid, 43e+3, 0.05, 0.05, 0.90, 'ColoidElecPhovoltaic 43 km/s 1:1:18', '#008000', '--' ],
[ photoCesium, 210e+3, 0.05, 0.05, 0.90, 'Cs+IonElecPhovoltaic 200 km/s 1:1:18', '#0080FF', '--' ],
]
'''
'''
ships=[
# spec.Power [W/kg] vexh [m/s] payload engine propelant description color line
[ 1.2711e+6, 4400.0, 0.25, 0.25, 0.50, 'SSME H2+LOX 4.4km/s 1:1: 2', '#FF0000', '-' ],
[ 0.3400e+6, 9800.0, 0.25, 0.25, 0.50, 'MITEE H2 9.8km/s 1:1: 2', '#FF8000', '-' ],
[ nucConservMassDriver, 30e+3, 0.25, 0.25, 0.50, 'MassDriverNucConserv 30 km/s 1:1: 2', '#8080FF', '-' ],
[ nucConservColoid, 43e+3, 0.25, 0.25, 0.50, 'ColoidElecNucConserv 43 km/s 1:1: 2', '#008000', '-' ],
[ nucConservCesium, 210e+3, 0.25, 0.25, 0.50, 'Cs+IonElecNucConserv 200 km/s 1:1: 2', '#0080FF', '-' ],
[ 1.2711E+006, 4400.0, 0.05, 0.05, 0.90, 'SSME H2+LOX 4.4km/s 1:1:18', '#FF0000', '--' ],
[ 0.3400E+006, 9800.0, 0.05, 0.05, 0.90, 'MITEE H2 9.8km/s 1:1:18', '#FF8000', '--' ],
[ nucConservMassDriver, 30e+3, 0.05, 0.05, 0.90, 'MassDriverNucConserv 30 km/s 1:1:18', '#8080FF', '--' ],
[ nucConservColoid, 43e+3, 0.05, 0.05, 0.90, 'ColoidElecNucConserv 43 km/s 1:1:18', '#008000', '--' ],
[ nucConservCesium, 210e+3, 0.05, 0.05, 0.90, 'Cs+IonElecNucConserv 200 km/s 1:1:18', '#0080FF', '--' ],
]
'''
ships=[
# spec.Power [W/kg] vexh [m/s] payload engine propelant description color line
[ 1.2711e+6, 4400.0, 0.1, 0.1, 0.80, 'SSME H2+LOX 4.4km/s 1:1:8', '#FF0000', '-' ],
[ 0.3400e+6, 9800.0, 0.1, 0.1, 0.80, 'MITEE H2 9.8km/s 1:1:8', '#FF8000', '-' ],
[ nucConservMassDriver, 30e+3, 0.1, 0.1, 0.80, 'MassDriverNucConserv 30 km/s 1:1:8', '#8080FF', '-' ],
[ nucConservColoid, 43e+3, 0.1, 0.1, 0.80, 'ColoidElecNucConserv 43 km/s 1:1:8', '#008000', '-' ],
[ nucConservCesium, 210e+3, 0.1, 0.1, 0.80, 'Cs+IonElecNucConserv 200 km/s 1:1:8', '#0080FF', '-' ],
]
# =========== Plot formating
timeLines = np.array([1.0, 60.0, 3600.0, 86400.0, 604800, 2592000, 31556926, 315569260, 3155692600, 31556926000 ])
timeTexts = ['sec','min','hour', 'day', 'week', 'month', 'year', '10years', '100years', '1000years']
#distLines = np.array([1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 6371e+3, 42164e+3, 384400e+3, 1e+9, 1e+10, 5.790918E+010, 1.082089E+011, 1.495979E+011, 2.279366E+011, 7.784120E+011, 1.426725E+012, 2.870972E+012, 4.498253E+012, 1.40621998e+13, 2.99195741e+14, 7.47989354e+15, 4.13425091e+16])
#distTexts = ['10m','100m', '1km','10km', '100km', '1000km', 'LEO','GEO', 'Moon', r'10$^6$km',r'10$^7$km', 'Mercury', 'Venus', 'Earth','Mars', 'Jupiter', 'Satrun', 'Uranus', 'Neptune', 'Heliopause', 'Oorth', 'Outer Oorth', 'Alpha Centauri']
distLines = np.array([1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1.49597e+11, 1.49597e+12, 1.495979E+013, 0.94605284e+14, 0.94605284e+15, 0.94605284e+16, 0.94605284e+17 ])
distTexts = ['10m','100m', '1km','10km', '100km', '1000km', r'10$^4$km',r'10$^5$km', r'10$^6$km', r'10$^7$km', '1AU', '10AU', '100AU', '0.01ly', '0.1ly', '1ly', '10ly' ]
def main():
# --- setup
times = 10**np.linspace( 0, 11.0, 100 )
nodes = np.linspace( 0.0, 1.0, 100 ); # nodes = nodes**2; nodes[0] = NaN
# big range
tmin=times[0]; tmax=times[-1]
dmin=1 ; dmax=1e+17
# --- main
plt.figure(figsize=(15,10))
for ship in ships:
print( ship[5], ship[1]*1e-3," km/s",ship[0]*1e-3, "kW/kg" )
st,vt = timeToDistace( times, ship[0], ship[1], ship[2], ship[3], ship[4], nodes=nodes )
plt.plot( st, times, label=ship[5], color=ship[6], ls=ship[7] );
#text ( st[-1], times[-1], ship[5], color='k', horizontalalignment='left', verticalalignment='center')
for time,txt in zip( timeLines, timeTexts ):
if ( time < tmax) and (time > tmin):
plt.axhline( time, color='k', alpha=0.5 )
plt.text ( dmax, time, txt, color='k', horizontalalignment='right', verticalalignment='baseline' )
for dist,txt in zip( distLines, distTexts ):
if ( dist < dmax) and (dist > dmin):
plt.axvline( dist, color='k', alpha=0.5 )
plt.text ( dist, tmin, txt, rotation=90, color='k', horizontalalignment='left', rotation_mode='anchor', verticalalignment='baseline')
plt.legend( loc=2, prop={'family': 'monospace'})
plt.xscale('log')
plt.yscale('log')
plt.xlim(dmin,dmax)
plt.ylim(tmin,tmax)
plt.ylabel(r"Flight Time [ s ]")
plt.xlabel(r"Distance [ m ]")
plt.grid()
#plt.savefig( "timeToDistance.png", bbox_inches='tight' )
plt.show()
if __name__ == "__main__":
main()
| mit |
joernhees/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
Nablaquabla/ezfit | easyfit.py | 1 | 15570 | # -*- coding: utf-8 -*-
'''====
EZ-Fit
====
Provides an easy to use wrapper to fit common functions to a data set using the
Levenberg–Marquardt algorithm provided by mpfit. A full description of the
supported functions and how to use the wrapper is given in easyfit.fit
-------------------------------------------------------------------------------
Copyright (C) 2015 - Bjorn J. Scholz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details: http://www.gnu.org/licenses.
-------------------------------------------------------------------------------
'''
import numpy as np
from mpfit import mpfit
import warnings
def __version__():
print 'Easy-Fit 0.2'
return
def const(x,p):
'''Parameter: constant\n
Return
------
>>> p[0]
'''
return [p[0]]*len(x)
def line(x,p):
'''Parameter: Slope, Intercept\n
Return
------
>>> p[0]*x + p[1]
'''
return p[0]*x + p[1]
def line0(x,p):
'''Parameter: Slope\n
Return
------
>>> p[0]*x
'''
return p[0]*x
def sine(x,p):
'''Parameter: Scale, Wavelength, Phase, Offset\n
Return
------
>>> p[0]*np.sin(2*np.pi*x/p[1]+p[2])+p[3]
'''
return p[0]*np.sin(2*np.pi*x/p[1]+p[2])+p[3]
def fermi(x,p):
'''Parameter: Scale, Edge Position, Width, Offset\n
Return
------
>>> p[0]/(np.exp((x-p[1])/p[2])+1)+p[3]
'''
return p[0]/(np.exp((x-p[1])/p[2])+1)+p[3]
def gauss(x,p):
'''Parameter: Scale, Mean, Std, Offset\n
Return
------
>>> p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
'''
return p[0]*np.exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
def exp(x,p):
'''Parameter: Scale, Decay Time, Offset\n
Return
------
>>> p[0]*np.exp(-x*p[1])+p[2]
'''
return p[0]*np.exp(-x*p[1])+p[2]
def poly(x,p,n):
'''Parameter: Scale of each power from [0..n]\n
Return
------
>>> Sum[n=0,n=N] p[n]*x**n
'''
y = 0
for i in range(n+1):
y+=np.power(x,i)*p[i]
return y
def ipoly(x,p,n):
'''Parameter: Scale of each power from [0..n]\n
Return
------
>>> Sum[n=0,n=N] p[n]*x**-n
'''
y = 0
for i in range(n+1):
y+=np.power(x,-i)*p[i]
return y
def plaw(x,p):
'''Parameter: Scale, Exponent
Return
------
>>> p[0]*x**p[1]
'''
return p[0]*x**p[1]
def lognorm(x,p):
'''Parameter: Scale, 'Mean', 'Std', Offset\n
Return
------
>>> p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
'''
return p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
def fit(typ='line',x='None', y='None', yerr='None',p0='None'):
'''
Takes the data and performs a least square fit of the specified type.
Parameters
----------
typ : string
Predefined function that will be fitted to the data. You can find a
list of all supported functions below.
x : array_like or None
X data. If None is given a fit will be performed, yet it is based on
an internally created x data set that runs from [0,N] where N is the
number of y data points provided. Thus all parameters that are not
independent of your choice of x, e.g. slope, are not to be trusted!
If you are only interested in parameters that are independent of x such
as the heigth of a gaussian you'll probably get away without providing
an adequate set of x data.
y : array_like
y data. You have to provide a y array. Otherwise there is nothing to
fit.
yerr : array_like or None
Error in y direction. If None is given the fit will assume a uniform
weight of 1.
p0 : array_like or None
Initial guess of fit parameters. If p0 is None all parameters are
initalized to one or zero depending on the meaning of the individual
parameter.
Returns
-------
x2 : float
Reducd chi-square.
pars : array_like
Fit parameters returned by mpfit. The meaning of the subarrays are:\n
pars[0]\tBest fit parameters\n
pars[1]\tFit errors\n
pars[2]\tProperly scaled errors\n
Note that it is assumed that the chi-squared returned is sufficiently
good to justify the scaling of the fit erros. It is pars[2] = pars[1]*
sqrt(x2)
xfit,yfit : array_like
x and y data that can directly be used within matplotlib or another
comparable plotting library to display the fit.
Available functions/fits
------------------------
const
Constant
>>> p[0]
line
Straight line, parameters: Slope and intercept\n
>>> p[0]*x + p[1]
line0
Straight line with designated zero crossing, parameters: Slope\n
>>> p[0]*x
sine
Sine, parameters: Scaling, Wavelength, Phase, Offset\n
>>> p[0]*sin(2*Pi*x/p[1]+p[2])+p[3]
fermi
Fermifunction, parameters: Scaling, Edge Position, Width, Offset\n
>>> p[0]/(exp((x-p[1])/p[2])+1)+p[3]
gauss
Gaussian, parameters: Scaling, Mean, Std, Offset\n
>>> p[0]*exp(-0.5*(x-p[1])**2/p[2]**2)+p[3]
exp
Exponential, parameters: Scaling, Inverse Decaytime, Offset\n
>>> p[0]*exp(-x*p[1])+p[2]
plaw
Power law, parameters: Scaling, Power\n
>>> p[0]*x**p[1]
polyN
Polynomial of order N. Usage: poly3, poly5, poly10, etc. Parameters:
Scalings\n
>>> Sum[n=0,n=N] p[n]*x**n
ipolyN
Inverse polynomial of order N. Usage: ipoly3, poly5, poly10 etc.
Parameters: Scalings\n
>>> Sum[n=0,n=N] p[n]*x**-n
lognorm
Lognormal distribution, Parameters: Scale, 'Mean', 'Std', Offset
The mean is E(X) = exp(μ + 1/2 σ^2), the median is med(X) = exp(μ), and the variance Var(X) = exp(2*μ + σ^2)*(exp(σ^2) - 1) and hence the coefficient of variation is sqrt(exp(σ^2) - 1)
>>> p[0]/x*np.exp(-0.5*(np.log(x)-p[1])**2/p[2]**2)+p[3]
Example
-------
The following code snippet explains the use of the easyfit wrapper
>>> import matplotlib.pylab as plt
>>> import numpy as np
>>> import easyfit as ef
>>>
>>> x = np.linspace(0,100,30)
>>> y = 0.05*x + 2*(np.random.rand(30)-0.5)
>>>
>>> p0 = [1]
>>> x2, pars, xfit, yfit = ef.fit('line0',x,y,None,p0)
>>>
>>> plt.scatter(x,y)
>>> plt.plot(xfit,yfit)
>>> plt.show()
'''
#=========================================================================#
# Filter Future Warning From Numpy
#=========================================================================#
# warnings.filterwarnings("ignore",category=FutureWarning)
#=========================================================================#
# Set default arrays
#=========================================================================#
n=0
if 'ipoly' in typ:
n = int(typ[5:])
typ = 'ipoly'
elif 'poly' in typ:
n = int(typ[4:])
typ = 'poly'
if x is 'None': x = np.arange(len(y))
if yerr is 'None': yerr = np.ones(len(y))
elif yerr is 'Poisson':
_ty = np.copy(y)
_ty[_ty <= 0] = 1
yerr = np.sqrt(_ty)
if p0 is 'None':
if typ == 'const': p0 = [0]
elif typ == 'line': p0 = [1,0]
elif typ == 'line0': p0 = [1]
elif typ == 'sine': p0 = [1,1,0,0]
elif typ == 'fermi': p0 = [1,1,1,0]
elif typ == 'gauss': p0 = [1,0,1,0]
elif typ == 'lognorm': p0 = [1,0,1,0]
elif typ == 'exp': p0 = [1,1,0]
elif typ == 'plaw': p0 = [1,1,0]
elif typ == 'poly' or typ == 'ipoly':
p0 = [1]*(n+1)
#=========================================================================#
# Ensure that all given arrays are numpy arrays
#=========================================================================#
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
p0 = np.array(p0)
#=========================================================================#
# Setup proper fit function
#=========================================================================#
models = {'const': const,
'line': line,
'line0': line0,
'sine': sine,
'fermi': fermi,
'gauss': gauss,
'exp': exp,
'plaw': plaw,
'lognorm': lognorm,
'poly': lambda x,p: poly(x,p,n),
'ipoly': lambda x,p: ipoly(x,p,n)}
def fitfunc(p, fjac=None, x=None, y=None, err=None):
model = models[typ](x,p)
status = 0
return [status, (y-model)/err]
#=========================================================================#
# Initialize fit info dictionary and try to fit function to data
#=========================================================================#
parbase = {'value':0,'fixed':0,'limited':[0,0],'limits':[0.,0.]}
parinfo = [{k:v for k,v in parbase.items()} for _ti in range(len(p0))]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]
fa = {'x': x, 'y': y, 'err': yerr}
m = mpfit(fitfunc, p0, parinfo=parinfo, functkw=fa,quiet=1)
dof = len(x) - len(m.params)
pcerror = m.perror * np.sqrt(m.fnorm / dof)
par = [m.params,m.perror,pcerror]
if(m.status <=0):
print 'status = ', m.status
#=========================================================================#
# Calculate goodness of fit and an easy to plot fitted data set
#=========================================================================#
x2 = m.fnorm/dof
xfit = np.linspace(np.min(x),np.max(x),1000)
yfit = models[typ](xfit,par[0])
return x2,par,xfit,yfit
def arbFit(fct=line,x='None', y='None', yerr='None',p0='None',limits='None'):
'''
Takes the data and performs a least square fit of the specified type.
Parameters
----------
fct : function
User defined function that will be fitted to the data. Has to obey the
following convention for its arguments: F(x,p)
x : array_like or None
X data. If None is given a fit will be performed, yet it is based on
an internally created x data set that runs from [0,N] where N is the
number of y data points provided. Thus all parameters that are not
independent of your choice of x, e.g. slope, are not to be trusted!
If you are only interested in parameters that are independent of x such
as the heigth of a gaussian you'll probably get away without providing
an adequate set of x data.
y : array_like
y data. You have to provide a y array. Otherwise there is nothing to
fit.
yerr : array_like or None
Error in y direction. If None is given the fit will assume a uniform
weight of 1.
p0 : array_like or None
Initial guess of fit parameters. If p0 is None all parameters are
initalized to one or zero depending on the meaning of the individual
parameter.
Returns
-------
x2 : float
Reducd chi-square.
pars : array_like
Fit parameters returned by mpfit. The meaning of the subarrays are:\n
pars[0]\tBest fit parameters\n
pars[1]\tFit errors\n
pars[2]\tProperly scaled errors\n
Note that it is assumed that the chi-squared returned is sufficiently
good to justify the scaling of the fit erros. It is pars[2] = pars[1]*
sqrt(x2)
xfit,yfit : array_like
x and y data that can directly be used within matplotlib or another
comparable plotting library to display the fit.
Example
-------
The following code snippet explains the use of the easyfit wrapper
>>> import matplotlib.pylab as plt
>>> import numpy as np
>>> import easyfit as ef
>>>
>>> def userFct(x,p):
>>> return p[0]*x**2 + np.exp(-p[1]*x)
>>>
>>> x = np.linspace(0,100,30)
>>> y = userFct(x,[-0.5,0.25]) + 100*(2*np.random.rand(30)-1)
>>>
>>> p0 = [1,0]
>>> x2, pars, xfit, yfit = ef.arbFit(userFct,x,y,None,p0)
>>>
>>> plt.scatter(x,y)
>>> plt.plot(xfit,yfit)
>>> plt.show()
'''
#=========================================================================#
# Filter Future Warning From Numpy
#=========================================================================#
# warnings.filterwarnings("ignore",category=FutureWarning)
#=========================================================================#
# Set default arrays
#=========================================================================#
if x is 'None': x = np.arange(len(y))
if yerr is 'None': yerr = np.ones(len(y))
elif yerr is 'Poisson':
_ty = np.copy(y)
_ty[_ty <= 0] = 1
yerr = np.sqrt(_ty)
if p0 is 'None':
p0 = np.ones(100)
#=========================================================================#
# Ensure that all given arrays are numpy arrays
#=========================================================================#
x = np.array(x)
y = np.array(y)
yerr = np.array(yerr)
p0 = np.array(p0)
#=========================================================================#
# Setup proper fit function
#=========================================================================#
def fitfunc(p, fjac=None, x=None, y=None, err=None):
model = fct(x,p)
status = 0
return [status, (y-model)/err]
#=========================================================================#
# Initialize fit info dictionary and try to fit function to data
#=========================================================================#
parbase = {'value':0,'fixed':0,'limited':[0,0],'limits':[0.,0.]}
parinfo = [{k:v for k,v in parbase.items()} for _ti in range(len(p0))]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]
if limits != 'None':
for i in range(len(limits)):
parinfo[int(limits[i][0])]['limited'] = limits[i][1:3]
parinfo[int(limits[i][0])]['limits'] = limits[i][3:]
fa = {'x': x, 'y': y, 'err': yerr}
m = mpfit(fitfunc, p0, parinfo=parinfo, functkw=fa,quiet=1)
dof = len(x) - len(m.params)
pcerror = m.perror * np.sqrt(m.fnorm / dof)
par = [m.params,m.perror,pcerror]
if(m.status <=0):
print 'status = ', m.status
#=========================================================================#
# Calculate goodness of fit and an easy to plot fitted data set
#=========================================================================#
x2 = m.fnorm/dof
xfit = np.linspace(np.min(x),np.max(x),1000)
yfit = fct(xfit,par[0])
return x2,par,xfit,yfit
| gpl-2.0 |
scipy/scipy | scipy/interpolate/ndgriddata.py | 12 | 8976 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbor interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""NearestNDInterpolator(x, y).
Nearest-neighbor interpolation in N > 1 dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
Examples
--------
We can interpolate values on a 2D plane:
>>> from scipy.interpolate import NearestNDInterpolator
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.random(10) - 0.5
>>> y = rng.random(10) - 0.5
>>> z = np.hypot(x, y)
>>> X = np.linspace(min(x), max(x))
>>> Y = np.linspace(min(y), max(y))
>>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
>>> interp = NearestNDInterpolator(list(zip(x, y)), z)
>>> Z = interp(X, Y)
>>> plt.pcolormesh(X, Y, Z, shading='auto')
>>> plt.plot(x, y, "ok", label="input point")
>>> plt.legend()
>>> plt.colorbar()
>>> plt.axis("equal")
>>> plt.show()
See also
--------
griddata :
Interpolate unstructured D-D data.
LinearNDInterpolator :
Piecewise linear interpolant in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
x1, x2, ... xn: array-like of float
Points where to interpolate data at.
x1, x2, ... xn can be array-like of float with broadcastable shape.
or x1 can be array-like of float with shape ``(..., ndim)``
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-D data.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> rng = np.random.default_rng()
>>> points = rng.random((1000, 2))
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
See also
--------
LinearNDInterpolator :
Piecewise linear interpolant in N dimensions.
NearestNDInterpolator :
Nearest-neighbor interpolation in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
worldveil/dejavu | dejavu/fingerprint.py | 1 | 5853 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to grab from the front of the SHA1 hash in the
# fingerprint calculation. The more you grab, the more memory storage,
# with potentially lesser collisions of matches.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.iterate_structure.html#scipy.ndimage.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our filter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks (Fixed deprecated boolean operator by changing '-' to '^')
detected_peaks = local_max ^ eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = filter(lambda x: x[2]>amp_min, peaks) # freq, time, amp
# get indices for frequency and time
frequency_idx = []
time_idx = []
for x in peaks_filtered:
frequency_idx.append(x[1])
time_idx.append(x[0])
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
| mit |
admcrae/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 25 | 33691 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import warnings
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
warnings.warn(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' % (self.monitor),
RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json',
'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by Tensorboard.
histogram_freq: frequency (in epochs) at which to compute activation
histograms for the layers of the model. If set to 0,
histograms won't be computed.
write_graph: whether to visualize the graph in Tensorboard.
The log file can become quite large when
write_graph is set to True.
write_images: whether to write model weights to visualize as
image in Tensorboard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
if len(shape) == 1:
w_img = array_ops.expand_dims(w_img, 0)
w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
self.saver = saver_lib.Saver()
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_logs = []
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
self.embeddings_logs.append(
os.path.join(self.log_dir, layer_name + '.ckpt'))
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO(fchollet): implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
if self.embeddings_freq and self.embeddings_logs:
if epoch % self.embeddings_freq == 0:
for log in self.embeddings_logs:
self.saver.save(self.sess, log, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a')
else:
self.csv_file = open(self.filename, 'w')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
miltonsarria/dsp-python | filters/ex2/examen2P1.py | 1 | 1903 | #Examen 2
#Abril 25 - 2018
#procesamiento digital de senales
#universidad santiago de cali
#Nombre:
#ID:
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('tools/')
from fourierFunc import fourierAn
##########################################
#BLOQUE 1
#definir la frecuencia de muestreo
Fs=1
tf=5 #tiempo final
#definir la secuencia de tiempo hasta 5 segundos
nT=np.linspace(1./Fs,tf,Fs*tf);
#generar secuencia discreta x[n]
x=2*np.sin(12*np.pi*nT)+3*np.cos(40*np.pi*nT)
#usar fourier para identificar las componentes frecuenciales
absX,Xdb,pX=fourierAn(x)
f=np.linspace(-Fs/2,Fs/2,Xdb.size)
#visualizar los resultados del analisis hecho con transformada de fourier
plt.subplot(211)
plt.plot(nT,x)
plt.ylabel('x[n]')
plt.xlabel('tiempo - s')
plt.subplot(212)
plt.plot(f,Xdb)
plt.ylabel('|X| en dB')
plt.xlabel('Frecuencia - Hz')
##########################################
#BLOQUE 2 eliminar las comillas de la linea 46 y linea 79
'''
#disenar filtro que permita pasar unicamente la componente de frecuencia mas baja
#modificar los parametros que sean necesarios
frec_norm=0.8
longitud_filtro=3
b1 = signal.firwin(longitud_filtro, frec_norm, window='hamming', pass_zero=True)
#obtener la respuesta en frecuencia
w, h = signal.freqz(b1)
#filtrar la onda con el filtro numero 1
x1=signal.lfilter(b1, [1.0],x)
#usar fourier para ilustrar el resultado del filtro
absX1,X1db,pX1=fourierAn(x1)
#
plt.figure(2)
#ilustrar la respuesta en frecuencia del filtro
plt.subplot(311)
plt.title('Respuesta en frecuencia de filtro digital numero 1')
plt.plot(w, 20 * np.log10(abs(h)), 'b')
plt.ylabel('Amplitud [dB]', color='b')
#ilustrar los resultados
plt.subplot(312)
plt.plot(nT,x1)
plt.ylabel('x1[n] - filtrada')
plt.xlabel('tiempo - s')
plt.subplot(313)
plt.plot(f,X1db)
plt.ylabel('|X1| en dB')
plt.xlabel('Frecuencia - Hz')
'''
plt.show()
| mit |
Winand/pandas | pandas/tests/plotting/test_misc.py | 2 | 11402 | # coding: utf-8
""" Test cases for misc plot functions """
import pytest
from pandas import DataFrame
from pandas.compat import lmap
import pandas.util.testing as tm
import numpy as np
from numpy import random
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
tm._skip_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
@pytest.mark.slow
def test_autocorrelation_plot(self):
from pandas.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, series=self.ts)
_check_plot_works(autocorrelation_plot, series=self.ts.values)
ax = autocorrelation_plot(self.ts, label='Test')
self._check_legend_labels(ax, labels=['Test'])
@pytest.mark.slow
def test_lag_plot(self):
from pandas.plotting import lag_plot
_check_plot_works(lag_plot, series=self.ts)
_check_plot_works(lag_plot, series=self.ts, lag=5)
@pytest.mark.slow
def test_bootstrap_plot(self):
from pandas.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, series=self.ts, size=10)
class TestDataFramePlots(TestPlotBase):
def test_scatter_matrix_axis(self):
tm._skip_if_no_scipy()
scatter_matrix = plotting.scatter_matrix
with tm.RNGContext(42):
df = DataFrame(randn(100, 3))
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(scatter_matrix, filterwarnings='always',
frame=df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
# GH 5662
if self.mpl_ge_2_0_0:
expected = ['-2', '0', '2']
else:
expected = ['-2', '-1', '0', '1', '2']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
df[0] = ((df[0] - 2) / 3)
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(scatter_matrix, filterwarnings='always',
frame=df, range_padding=.1)
axes0_labels = axes[0][0].yaxis.get_majorticklabels()
if self.mpl_ge_2_0_0:
expected = ['-1.0', '-0.5', '0.0']
else:
expected = ['-1.2', '-1.0', '-0.8', '-0.6', '-0.4', '-0.2', '0.0']
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(
axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
@pytest.mark.slow
def test_andrews_curves(self):
from pandas.plotting import andrews_curves
from matplotlib import cm
df = self.iris
_check_plot_works(andrews_curves, frame=df, class_column='Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=rgba)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=cnames)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
length = 10
df = DataFrame({"A": random.rand(length),
"B": random.rand(length),
"C": random.rand(length),
"Name": ["A"] * length})
_check_plot_works(andrews_curves, frame=df, class_column='Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=rgba)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', color=cnames)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(andrews_curves, frame=df,
class_column='Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
colors = ['b', 'g', 'r']
df = DataFrame({"A": [1, 2, 3],
"B": [1, 2, 3],
"C": [1, 2, 3],
"Name": colors})
ax = andrews_curves(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
with tm.assert_produces_warning(FutureWarning):
andrews_curves(data=df, class_column='Name')
@pytest.mark.slow
def test_parallel_coordinates(self):
from pandas.plotting import parallel_coordinates
from matplotlib import cm
df = self.iris
ax = _check_plot_works(parallel_coordinates,
frame=df, class_column='Name')
nlines = len(ax.get_lines())
nxticks = len(ax.xaxis.get_ticklabels())
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(parallel_coordinates,
frame=df, class_column='Name', color=rgba)
self._check_colors(
ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
ax = _check_plot_works(parallel_coordinates,
frame=df, class_column='Name', color=cnames)
self._check_colors(
ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10])
ax = _check_plot_works(parallel_coordinates,
frame=df, class_column='Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
self._check_colors(
ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10])
ax = _check_plot_works(parallel_coordinates,
frame=df, class_column='Name', axvlines=False)
assert len(ax.get_lines()) == (nlines - nxticks)
colors = ['b', 'g', 'r']
df = DataFrame({"A": [1, 2, 3],
"B": [1, 2, 3],
"C": [1, 2, 3],
"Name": colors})
ax = parallel_coordinates(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(data=df, class_column='Name')
with tm.assert_produces_warning(FutureWarning):
parallel_coordinates(df, 'Name', colors=colors)
def test_parallel_coordinates_with_sorted_labels(self):
""" For #15908 """
from pandas.plotting import parallel_coordinates
df = DataFrame({"feat": [i for i in range(30)],
"class": [2 for _ in range(10)] +
[3 for _ in range(10)] +
[1 for _ in range(10)]})
ax = parallel_coordinates(df, 'class', sort_labels=True)
polylines, labels = ax.get_legend_handles_labels()
color_label_tuples = \
zip([polyline.get_color() for polyline in polylines], labels)
ordered_color_label_tuples = sorted(color_label_tuples,
key=lambda x: x[1])
prev_next_tupels = zip([i for i in ordered_color_label_tuples[0:-1]],
[i for i in ordered_color_label_tuples[1:]])
for prev, nxt in prev_next_tupels:
# lables and colors are ordered strictly increasing
assert prev[1] < nxt[1] and prev[0] < nxt[0]
@pytest.mark.slow
def test_radviz(self):
from pandas.plotting import radviz
from matplotlib import cm
df = self.iris
_check_plot_works(radviz, frame=df, class_column='Name')
rgba = ('#556270', '#4ECDC4', '#C7F464')
ax = _check_plot_works(
radviz, frame=df, class_column='Name', color=rgba)
# skip Circle drawn as ticks
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(
patches[:10], facecolors=rgba, mapping=df['Name'][:10])
cnames = ['dodgerblue', 'aquamarine', 'seagreen']
_check_plot_works(radviz, frame=df, class_column='Name', color=cnames)
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(patches, facecolors=cnames, mapping=df['Name'][:10])
_check_plot_works(radviz, frame=df,
class_column='Name', colormap=cm.jet)
cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique()))
patches = [p for p in ax.patches[:20] if p.get_label() != '']
self._check_colors(patches, facecolors=cmaps, mapping=df['Name'][:10])
colors = [[0., 0., 1., 1.],
[0., 0.5, 1., 1.],
[1., 0., 0., 1.]]
df = DataFrame({"A": [1, 2, 3],
"B": [2, 1, 3],
"C": [3, 2, 1],
"Name": ['b', 'g', 'r']})
ax = radviz(df, 'Name', color=colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=colors)
@pytest.mark.slow
def test_subplot_titles(self):
df = self.iris.drop('Name', axis=1).head()
# Use the column names as the subplot titles
title = list(df.columns)
# Case len(title) == len(df)
plot = df.plot(subplots=True, title=title)
assert [p.get_title() for p in plot] == title
# Case len(title) > len(df)
pytest.raises(ValueError, df.plot, subplots=True,
title=title + ["kittens > puppies"])
# Case len(title) < len(df)
pytest.raises(ValueError, df.plot, subplots=True, title=title[:2])
# Case subplots=False and title is of type list
pytest.raises(ValueError, df.plot, subplots=False, title=title)
# Case df with 3 numeric columns but layout of (2,2)
plot = df.drop('SepalWidth', axis=1).plot(subplots=True, layout=(2, 2),
title=title[:-1])
title_list = [ax.get_title() for sublist in plot for ax in sublist]
assert title_list == title[:3] + ['']
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/tsa/tsatools.py | 19 | 20189 | from statsmodels.compat.python import range, lrange, lzip
import numpy as np
import numpy.lib.recfunctions as nprf
from statsmodels.tools.tools import add_constant
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
def add_trend(X, trend="c", prepend=False, has_constant='skip'):
"""
Adds a trend and/or constant to an array.
Parameters
----------
X : array-like
Original array of data.
trend : str {"c","t","ct","ctt"}
"c" add constant only
"t" add trend only
"ct" add constant and linear trend
"ctt" add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant already
exists in X. 'raise' will raise an error. 'add' will duplicate a
constant. 'skip' will return the data without change. 'skip' is the
default.
Notes
-----
Returns columns as ["ctt","ct","c"] whenever applicable. There is currently
no checking for an existing trend.
See also
--------
statsmodels.add_constant
"""
#TODO: could be generalized for trend of aribitrary order
trend = trend.lower()
if trend == "c": # handles structured arrays
return add_constant(X, prepend=prepend, has_constant=has_constant)
elif trend == "ct" or trend == "t":
trendorder = 1
elif trend == "ctt":
trendorder = 2
else:
raise ValueError("trend %s not understood" % trend)
X = np.asanyarray(X)
nobs = len(X)
trendarr = np.vander(np.arange(1,nobs+1, dtype=float), trendorder+1)
# put in order ctt
trendarr = np.fliplr(trendarr)
if trend == "t":
trendarr = trendarr[:,1]
if not X.dtype.names:
# check for constant
if "c" in trend and np.any(np.ptp(X, axis=0) == 0):
if has_constant == 'raise':
raise ValueError("X already contains a constant")
elif has_constant == 'add':
pass
elif has_constant == 'skip' and trend == "ct":
trendarr = trendarr[:, 1]
if not prepend:
X = np.column_stack((X, trendarr))
else:
X = np.column_stack((trendarr, X))
else:
return_rec = data.__clas__ is np.recarray
if trendorder == 1:
if trend == "ct":
dt = [('const',float),('trend',float)]
else:
dt = [('trend', float)]
elif trendorder == 2:
dt = [('const',float),('trend',float),('trend_squared', float)]
trendarr = trendarr.view(dt)
if prepend:
X = nprf.append_fields(trendarr, X.dtype.names, [X[i] for i
in X.dtype.names], usemask=False, asrecarray=return_rec)
else:
X = nprf.append_fields(X, trendarr.dtype.names, [trendarr[i] for i
in trendarr.dtype.names], usemask=False, asrecarray=return_rec)
return X
def add_lag(x, col=None, lags=1, drop=False, insert=True):
"""
Returns an array with lags included given an array.
Parameters
----------
x : array
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column containing the variable. Or `col` can
be an int of the zero-based column index. If it's a 1d array `col`
can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load()
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags
"""
if x.dtype.names:
names = x.dtype.names
if not col and np.squeeze(x).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
elif len(names) == 1:
col = names[0]
if isinstance(col, int):
col = x.dtype.names[col]
contemp = x[col]
# make names for lags
tmp_names = [col + '_'+'L(%i)' % i for i in range(1,lags+1)]
ndlags = lagmat(contemp, maxlag=lags, trim='Both')
# get index for return
if insert is True:
ins_idx = list(names).index(col) + 1
elif insert is False:
ins_idx = len(names) + 1
else: # insert is an int
if insert > len(names):
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position",
UserWarning)
ins_idx = insert
first_names = list(names[:ins_idx])
last_names = list(names[ins_idx:])
if drop:
if col in first_names:
first_names.pop(first_names.index(col))
else:
last_names.pop(last_names.index(col))
if first_names: # only do this if x isn't "empty"
first_arr = nprf.append_fields(x[first_names][lags:],tmp_names,
ndlags.T, usemask=False)
else:
first_arr = np.zeros(len(x)-lags, dtype=lzip(tmp_names,
(x[col].dtype,)*lags))
for i,name in enumerate(tmp_names):
first_arr[name] = ndlags[:,i]
if last_names:
return nprf.append_fields(first_arr, last_names,
[x[name][lags:] for name in last_names], usemask=False)
else: # lags for last variable
return first_arr
else: # we have an ndarray
if x.ndim == 1: # make 2d if 1d
x = x[:,None]
if col is None:
col = 0
# handle negative index
if col < 0:
col = x.shape[1] + col
contemp = x[:,col]
if insert is True:
ins_idx = col + 1
elif insert is False:
ins_idx = x.shape[1]
else:
if insert < 0: # handle negative index
insert = x.shape[1] + insert + 1
if insert > x.shape[1]:
insert = x.shape[1]
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position",
UserWarning)
ins_idx = insert
ndlags = lagmat(contemp, lags, trim='Both')
first_cols = lrange(ins_idx)
last_cols = lrange(ins_idx,x.shape[1])
if drop:
if col in first_cols:
first_cols.pop(first_cols.index(col))
else:
last_cols.pop(last_cols.index(col))
return np.column_stack((x[lags:,first_cols],ndlags,
x[lags:,last_cols]))
def detrend(x, order=1, axis=0):
'''detrend an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, 1d or 2d
data, if 2d, then each row or column is independently detrended with the
same trendorder, but independent trend estimates
order : int
specifies the polynomial order of the trend, zero is constant, one is
linear trend, two is quadratic trend
axis : int
axis can be either 0, observations by rows,
or 1, observations by columns
Returns
-------
detrended data series : ndarray
The detrended series is the residual of the linear regression of the
data on the trend of given order.
'''
x = np.asarray(x)
nobs = x.shape[0]
if order == 0:
return x - np.expand_dims(x.mean(axis), axis)
else:
if x.ndim == 2 and lrange(2)[axis]==1:
x = x.T
elif x.ndim > 2:
raise NotImplementedError('x.ndim>2 is not implemented until it is needed')
#could use a polynomial, but this should work also with 2d x, but maybe not yet
trends = np.vander(np.arange(nobs).astype(float), N=order+1)
beta = np.linalg.lstsq(trends, x)[0]
resid = x - np.dot(trends, beta)
if x.ndim == 2 and lrange(2)[axis]==1:
resid = resid.T
return resid
def lagmat(x, maxlag, trim='forward', original='ex'):
'''create 2d array of lags
Parameters
----------
x : array_like, 1d or 2d
data; if 2d, observation in rows and variables in columns
maxlag : int or sequence of ints
all lags from zero to maxlag are included
trim : str {'forward', 'backward', 'both', 'none'} or None
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none', None : no trimming of observations
original : str {'ex','sep','in'}
* 'ex' : drops the original array returning only the lagged values.
* 'in' : returns the original array and the lagged values as a single
array.
* 'sep' : returns a tuple (original array, lagged values). The original
array is truncated to have the same number of rows as
the returned lagmat.
Returns
-------
lagmat : 2d array
array with lagged observations
y : 2d array, optional
Only returned if original == 'sep'
Examples
--------
>>> from statsmodels.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward", original='in')
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both", original='in')
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
Notes
-----
TODO:
* allow list of lags additional to maxlag
* create varnames for columns
'''
x = np.asarray(x)
dropidx = 0
if x.ndim == 1:
x = x[:,None]
nobs, nvar = x.shape
if original in ['ex','sep']:
dropidx = nvar
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs+maxlag, nvar*(maxlag+1)))
for k in range(0, int(maxlag+1)):
lm[maxlag-k:nobs+maxlag-k, nvar*(maxlag-k):nvar*(maxlag-k+1)] = x
if trim:
trimlower = trim.lower()
else:
trimlower = trim
if trimlower == 'none' or not trimlower:
startobs = 0
stopobs = len(lm)
elif trimlower == 'forward':
startobs = 0
stopobs = nobs+maxlag-k
elif trimlower == 'both':
startobs = maxlag
stopobs = nobs+maxlag-k
elif trimlower == 'backward':
startobs = maxlag
stopobs = len(lm)
else:
raise ValueError('trim option not valid')
if original == 'sep':
return lm[startobs:stopobs,dropidx:], x[startobs:stopobs]
else:
return lm[startobs:stopobs,dropidx:]
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward'):
'''generate lagmatrix for 2d array, columns arranged by variables
Parameters
----------
x : array_like, 2d
2d data, observation in rows and variables in columns
maxlag0 : int
for first variable all lags from zero to maxlag are included
maxlagex : None or int
max lag for all other variables all lags from zero to maxlag are included
dropex : int (default is 0)
exclude first dropex lags from other variables
for all variables, except the first, lags from dropex to maxlagex are
included
trim : string
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none' : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations, columns ordered by variable
Notes
-----
very inefficient for unequal lags, just done for convenience
'''
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
nobs, nvar = x.shape
lagsli = [lagmat(x[:,0], maxlag, trim=trim, original='in')[:,:maxlag0+1]]
for k in range(1,nvar):
lagsli.append(lagmat(x[:,k], maxlag, trim=trim, original='in')[:,dropex:maxlagex+1])
return np.column_stack(lagsli)
def vec(mat):
return mat.ravel('F')
def vech(mat):
# Gets Fortran-order
return mat.T.take(_triu_indices(len(mat)))
# tril/triu/diag, suitable for ndarray.take
def _tril_indices(n):
rows, cols = np.tril_indices(n)
return rows * n + cols
def _triu_indices(n):
rows, cols = np.triu_indices(n)
return rows * n + cols
def _diag_indices(n):
rows, cols = np.diag_indices(n)
return rows * n + cols
def unvec(v):
k = int(np.sqrt(len(v)))
assert(k * k == len(v))
return v.reshape((k, k), order='F')
def unvech(v):
# quadratic formula, correct fp error
rows = .5 * (-1 + np.sqrt(1 + 8 * len(v)))
rows = int(np.round(rows))
result = np.zeros((rows, rows))
result[np.triu_indices(rows)] = v
result = result + result.T
# divide diagonal elements by 2
result[np.diag_indices(rows)] /= 2
return result
def duplication_matrix(n):
"""
Create duplication matrix D_n which satisfies vec(S) = D_n vech(S) for
symmetric matrix S
Returns
-------
D_n : ndarray
"""
tmp = np.eye(n * (n + 1) // 2)
return np.array([unvech(x).ravel() for x in tmp]).T
def elimination_matrix(n):
"""
Create the elimination matrix L_n which satisfies vech(M) = L_n vec(M) for
any matrix M
Parameters
----------
Returns
-------
"""
vech_indices = vec(np.tril(np.ones((n, n))))
return np.eye(n * n)[vech_indices != 0]
def commutation_matrix(p, q):
"""
Create the commutation matrix K_{p,q} satisfying vec(A') = K_{p,q} vec(A)
Parameters
----------
p : int
q : int
Returns
-------
K : ndarray (pq x pq)
"""
K = np.eye(p * q)
indices = np.arange(p * q).reshape((p, q), order='F')
return K.take(indices.ravel(), axis=0)
def _ar_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The AR coefficients
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
for j in range(1,len(params)):
a = newparams[j]
for kiter in range(j):
tmp[kiter] -= a * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ar_invtransparams(params):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed AR coefficients
"""
# AR coeffs
tmp = params.copy()
for j in range(len(params)-1,0,-1):
a = params[j]
for kiter in range(j):
tmp[kiter] = (params[kiter] + a * params[j-kiter-1])/\
(1-a**2)
params[:j] = tmp[:j]
invarcoefs = -np.log((1-params)/(1+params))
return invarcoefs
def _ma_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The ma coeffecients of an (AR)MA model.
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
# levinson-durbin to get macf
for j in range(1,len(params)):
b = newparams[j]
for kiter in range(j):
tmp[kiter] += b * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ma_invtransparams(macoefs):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed MA coefficients
"""
tmp = macoefs.copy()
for j in range(len(macoefs)-1,0,-1):
b = macoefs[j]
for kiter in range(j):
tmp[kiter] = (macoefs[kiter]-b *macoefs[j-kiter-1])/(1-b**2)
macoefs[:j] = tmp[:j]
invmacoefs = -np.log((1-macoefs)/(1+macoefs))
return invmacoefs
def unintegrate_levels(x, d):
"""
Returns the successive differences needed to unintegrate the series.
Parameters
----------
x : array-like
The original series
d : int
The number of differences of the differenced series.
Returns
-------
y : array-like
The increasing differences from 0 to d-1 of the first d elements
of x.
See Also
--------
unintegrate
"""
x = x[:d]
return np.asarray([np.diff(x, d - i)[0] for i in range(d, 0, -1)])
def unintegrate(x, levels):
"""
After taking n-differences of a series, return the original series
Parameters
----------
x : array-like
The n-th differenced series
levels : list
A list of the first-value in each differenced series, for
[first-difference, second-difference, ..., n-th difference]
Returns
-------
y : array-like
The original series de-differenced
Examples
--------
>>> x = np.array([1, 3, 9., 19, 8.])
>>> levels = unintegrate_levels(x, 2)
>>> levels
array([ 1., 2.])
>>> unintegrate(np.diff(x, 2), levels)
array([ 1., 3., 9., 19., 8.])
"""
levels = list(levels)[:] # copy
if len(levels) > 1:
x0 = levels.pop(-1)
return unintegrate(np.cumsum(np.r_[x0, x]), levels)
x0 = levels[0]
return np.cumsum(np.r_[x0, x])
def freq_to_period(freq):
"""
Convert a pandas frequency to a periodicity
Parameters
----------
freq : str or offset
Frequency to convert
Returns
-------
period : int
Periodicity of freq
Notes
-----
Annual maps to 1, quarterly maps to 4, monthly to 12, weekly to 52.
"""
if not isinstance(freq, offsets.DateOffset):
freq = to_offset(freq) # go ahead and standardize
freq = freq.rule_code.upper()
if freq == 'A' or freq.startswith(('A-', 'AS-')):
return 1
elif freq == 'Q' or freq.startswith(('Q-', 'QS-')):
return 4
elif freq == 'M' or freq.startswith(('M-', 'MS')):
return 12
elif freq == 'B' or freq == 'W' or freq.startswith('W-'):
return 52
else: # pragma : no cover
raise ValueError("freq {} not understood. Please report if you "
"think this in error.".format(freq))
__all__ = ['lagmat', 'lagmat2ds','add_trend', 'duplication_matrix',
'elimination_matrix', 'commutation_matrix',
'vec', 'vech', 'unvec', 'unvech']
if __name__ == '__main__':
# sanity check, mainly for imports
x = np.random.normal(size=(100,2))
tmp = lagmat(x,2)
tmp = lagmat2ds(x,2)
# grangercausalitytests(x, 2)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/font_table_ttf.py | 3 | 1544 | #!/usr/bin/env python
# -*- noplot -*-
"""
matplotlib has support for freetype fonts. Here's a little example
using the 'table' command to build a font table that shows the glyphs
by character code.
Usage python font_table_ttf.py somefile.ttf
"""
import sys, os
from matplotlib.ft2font import FT2Font
from pylab import figure, table, show, axis, title
from matplotlib.font_manager import FontProperties
# the font table grid
labelc = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F']
labelr = ['00', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'A0', 'B0', 'C0', 'D0', 'E0', 'F0']
fontname = sys.argv[1]
font = FT2Font(fontname)
codes = font.get_charmap().items()
codes.sort()
# a 16,16 array of character strings
chars = [ ['' for c in range(16)] for r in range(16)]
colors = [ [(0.95,0.95,0.95) for c in range(16)] for r in range(16)]
figure(figsize=(8,4),dpi=120)
for ccode, glyphind in codes:
if ccode>=256: continue
r,c = divmod(ccode,16)
s = unichr(ccode)
chars[r][c] = s
lightgrn = (0.5,0.8,0.5)
title(fontname)
tab = table(cellText=chars,
rowLabels=labelr,
colLabels=labelc,
rowColours=[lightgrn]*16,
colColours=[lightgrn]*16,
cellColours=colors,
cellLoc='center',
loc='upper left')
for key, cell in tab.get_celld().items():
row, col = key
if row>0 and col>0:
cell.set_text_props(fontproperties=FontProperties(fname=sys.argv[1]))
axis('off')
show()
| gpl-2.0 |
madcowswe/ODrive | tools/setup.py | 1 | 4104 | """
This script is used to deploy the ODrive python tools to PyPi
so that users can install them easily with
"pip install odrive"
To install the package and its dependencies locally, run:
sudo pip install -r requirements.txt
To build and package the python tools into a tar archive:
python setup.py sdist
Warning: Before you proceed, be aware that you can upload a
specific version only once ever. After that you need to increment
the hotfix number. Deleting the release manually on the PyPi
website does not help.
Use TestPyPi while developing.
To build, package and upload the python tools to TestPyPi, run:
python setup.py sdist upload -r pypitest
To make a real release ensure you're at the release commit
and then run the above command without the "test" (so just "pypi").
To install a prerelease version from test index:
(extra-index-url is there because some packages don't upload to test server)
sudo pip install --pre --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ --no-cache-dir odrive
PyPi access requires that you have set up ~/.pypirc with your
PyPi credentials and that your account has the rights
to publish packages with the name odrive.
"""
# Set to true to make the current release
is_release = True
# Set to true to make an official post-release, rather than dev of new version
is_post_release = True
post_rel_num = 0
# To test higher numbered releases, bump to the next rev
devnum = 0
bump_rev = not is_post_release and not is_release
# TODO: add additional y/n prompt to prevent from erroneous upload
from setuptools import setup
import os
import sys
if sys.version_info < (3, 3):
import exceptions
PermissionError = exceptions.OSError
creating_package = "sdist" in sys.argv
# Load version from Git tag
import odrive.version
version = odrive.version.get_version_str(
git_only=creating_package,
is_post_release=is_post_release,
bump_rev=bump_rev,
release_override=is_release)
# If we're currently creating the package we need to autogenerate
# a file that contains the version string
if creating_package:
if is_post_release:
version += str(post_rel_num)
elif (devnum > 0):
version += str(devnum)
version_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'odrive', 'version.txt')
with open(version_file_path, mode='w') as version_file:
version_file.write(version)
# TODO: find a better place for this
if not creating_package:
import platform
if platform.system() == 'Linux':
try:
odrive.version.setup_udev_rules(None)
except Exception:
print("Warning: could not set up udev rules. Run `sudo odrivetool udev-setup` to try again.")
try:
setup(
name = 'odrive',
packages = ['odrive', 'odrive.dfuse', 'odrive.pyfibre.fibre'],
scripts = ['odrivetool', 'odrivetool.bat', 'odrive_demo.py'],
version = version,
description = 'Control utilities for the ODrive high performance motor controller',
author = 'Oskar Weigl',
author_email = '[email protected]',
license='MIT',
url = 'https://github.com/madcowswe/ODrive',
keywords = ['odrive', 'motor', 'motor control'],
install_requires = [
'ipython', # Used to do the interactive parts of the odrivetool
'PyUSB', # Only required for DFU. Normal communication happens through libfibre.
'requests', # Used to by DFU to load firmware files
'IntelHex', # Used to by DFU to download firmware from github
'matplotlib', # Required to run the liveplotter
'monotonic', # For compatibility with older python versions
'setuptools', # ubuntu-latest on GitHub Actions fails to install odrive without this dependency
'pywin32 >= 222; platform_system == "Windows"' # Required for fancy terminal features on Windows
],
package_data={'': [
'version.txt',
'pyfibre/fibre/*.so',
'pyfibre/fibre/*.dll',
'pyfibre/fibre/*.dylib'
]},
classifiers = [],
)
# TODO: include README
finally:
# clean up
if creating_package:
os.remove(version_file_path)
| mit |
jmargeta/scikit-learn | sklearn/linear_model/__init__.py | 6 | 2578 | """
The :mod:`sklearn.linear_model` module implements genelarized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import LogisticRegression
from .omp import orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskLasso',
'OrthogonalMatchingPursuit',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression']
| bsd-3-clause |
cl4rke/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
apur27/public | ASX-Python/LoadTrainPredict-LSTM.py | 1 | 3715 |
import glob
#import os
import pandas as pd
colnames=['Ticker', 'Date', 'Open', 'High', 'Low', 'Close', 'Volume']
def pivotAndInterpolate(row,index,column,reIndex, interpolater,limiter, df):
dfOut = df.pivot_table(row, index, column)
dfOut.index = pd.to_datetime(dfOut.index, format='%Y%m%d')
dfOut = dfOut.reindex(reIndex)
dfOut=dfOut.interpolate(method=interpolater, limit_area=limiter)
dfOut=dfOut.fillna(0)
return dfOut
all_files = glob.glob('C:/QM/rnd/ASX-2015-2018/ASX-2015-2018/2*.txt') # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, names=colnames, header=None, encoding='utf-8') for f in all_files)
data = pd.concat(df_from_each_file, ignore_index=True, sort=True)
data['HighLow'] = data['High']/data['Low']
index = pd.date_range('20150102','20180629')
dfOpen=pivotAndInterpolate('Open', ['Date'], 'Ticker',index, 'linear','inside', data)
dfLow=pivotAndInterpolate('High', ['Date'], 'Ticker',index, 'linear','inside',data)
dfHigh=pivotAndInterpolate('Low', ['Date'], 'Ticker',index, 'linear','inside',data)
dfClose=pivotAndInterpolate('Close', ['Date'], 'Ticker',index, 'linear','inside',data)
dfVolume=pivotAndInterpolate('Volume', ['Date'], 'Ticker',index, 'linear','inside',data)
dfHighLow=pivotAndInterpolate('HighLow', ['Date'], 'Ticker',index, 'linear','inside',data)
dfCloseReturns=dfClose/dfClose.shift(1) - 1 #Close to close Returns
import numpy as np
import matplotlib.pyplot as plt
#importing prophet
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
asxTicker='SLR'
ticker=dfClose[asxTicker]
ticker=ticker.reset_index()
new_data = pd.DataFrame(index=range(0,len(ticker)),columns=['Date', 'Close'])
for i in range(0,len(ticker)):
new_data['Date'][i] = ticker['index'][i]
new_data['Close'][i] = ticker[asxTicker][i]
trainSize=700
#new_data['Date'] = pd.to_datetime(new_data['Date'],format='%Y-%m-%d')
new_data.index = new_data.Date
new_data.drop('Date', axis=1, inplace=True)
#creating train and test sets
dataset = new_data.values
train = dataset[0:trainSize,:]
valid = dataset[trainSize:,:]
#converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60,len(train)):
x_train.append(scaled_data[i-60:i,0])
y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(LSTM(units=50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=1, batch_size=1, verbose=2)
#predicting 246 values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
rmsL=np.sqrt(np.mean(np.power((valid-closing_price),2)))
#for plotting
train = new_data[:trainSize]
valid = new_data[trainSize:]
valid['Predictions'] = closing_price
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
| artistic-2.0 |
alexmojaki/odo | odo/backends/tests/test_aws.py | 3 | 9423 | from __future__ import print_function
import pytest
import sys
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason='Requires Mac or Linux')
sa = pytest.importorskip('sqlalchemy')
boto = pytest.importorskip('boto')
pytest.importorskip('psycopg2')
pytest.importorskip('redshift_sqlalchemy')
import os
import itertools
import json
from contextlib import contextmanager, closing
from odo import into, resource, S3, discover, CSV, drop, append, odo
from odo.backends.aws import get_s3_connection
from odo.utils import tmpfile
from odo.compatibility import urlopen
import pandas as pd
import pandas.util.testing as tm
import datashape
from datashape import string, float64, int64
from boto.exception import S3ResponseError, NoAuthHandlerFound
tips_uri = 's3://nyqpug/tips.csv'
df = pd.DataFrame({
'a': list('abc'),
'b': [1, 2, 3],
'c': [1.0, 2.0, 3.0]
})[['a', 'b', 'c']]
js = pd.io.json.loads(pd.io.json.dumps(df, orient='records'))
is_authorized = False
tried = False
with closing(urlopen('http://httpbin.org/ip')) as url:
public_ip = json.loads(url.read().decode())['origin']
cidrip = public_ip + '/32'
@pytest.fixture(scope='module')
def rs_auth():
# if we aren't authorized and we've tried to authorize then skip, prevents
# us from having to deal with timeouts
# TODO: this will fail if we want to use a testing cluster with a different
# security group than 'default'
global is_authorized, tried
if not is_authorized and not tried:
if not tried:
try:
conn = boto.connect_redshift()
except NoAuthHandlerFound as e:
pytest.skip('authorization to access redshift cluster failed '
'%s' % e)
try:
conn.authorize_cluster_security_group_ingress('default',
cidrip=cidrip)
except boto.redshift.exceptions.AuthorizationAlreadyExists:
is_authorized = True
except Exception as e:
pytest.skip('authorization to access redshift cluster failed '
'%s' % e)
else:
is_authorized = True
finally:
tried = True
else:
pytest.skip('authorization to access redshift cluster failed')
@pytest.fixture
def db(rs_auth):
key = os.environ.get('REDSHIFT_DB_URI', None)
if not key:
pytest.skip('Please define a non-empty environment variable called '
'REDSHIFT_DB_URI to test redshift <- S3')
else:
return key
@pytest.yield_fixture
def temp_tb(db):
t = '%s::%s' % (db, next(_tmps))
try:
yield t
finally:
drop(resource(t))
@pytest.yield_fixture
def tmpcsv():
with tmpfile('.csv') as fn:
with open(fn, mode='w') as f:
df.to_csv(f, index=False)
yield fn
@contextmanager
def s3_bucket(extension):
with conn():
b = 's3://%s/%s%s' % (test_bucket_name, next(_tmps), extension)
try:
yield b
finally:
drop(resource(b))
@contextmanager
def conn():
# requires that you have a config file or envars defined for credentials
# this code makes me hate exceptions
try:
conn = get_s3_connection()
except S3ResponseError:
pytest.skip('unable to connect to s3')
else:
try:
grants = conn.get_bucket(test_bucket_name).get_acl().acl.grants
except S3ResponseError:
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
if not any(g.permission == 'FULL_CONTROL' or
g.permission == 'READ' for g in grants):
pytest.skip('no permission to read on bucket %s' %
test_bucket_name)
else:
yield conn
test_bucket_name = 'into-redshift-csvs'
_tmps = ('tmp%d' % i for i in itertools.count())
def test_s3_resource():
csv = resource(tips_uri)
assert isinstance(csv, S3(CSV))
def test_s3_discover():
csv = resource(tips_uri)
assert isinstance(discover(csv), datashape.DataShape)
def test_s3_to_local_csv():
with tmpfile('.csv') as fn:
csv = into(fn, tips_uri)
path = os.path.abspath(csv.path)
assert os.path.exists(path)
def test_csv_to_s3_append():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
s3 = resource(b)
df.to_csv(fn, index=False)
append(s3, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_csv_to_s3_into():
df = tm.makeMixedDataFrame()
with tmpfile('.csv') as fn:
with s3_bucket('.csv') as b:
df.to_csv(fn, index=False)
s3 = into(b, CSV(fn))
result = into(pd.DataFrame, s3)
tm.assert_frame_equal(df, result)
def test_s3_to_redshift(temp_tb):
s3 = resource(tips_uri)
table = into(temp_tb, s3)
assert discover(table) == discover(s3)
assert into(set, table) == into(set, s3)
def test_redshift_getting_started(temp_tb):
dshape = datashape.dshape("""var * {
userid: int64,
username: ?string[8],
firstname: ?string[30],
lastname: ?string[30],
city: ?string[30],
state: ?string[2],
email: ?string[100],
phone: ?string[14],
likesports: ?bool,
liketheatre: ?bool,
likeconcerts: ?bool,
likejazz: ?bool,
likeclassical: ?bool,
likeopera: ?bool,
likerock: ?bool,
likevegas: ?bool,
likebroadway: ?bool,
likemusicals: ?bool,
}""")
csv = S3(CSV)('s3://awssampledb/tickit/allusers_pipe.txt')
table = into(temp_tb, csv, dshape=dshape)
# make sure we have a non empty table
assert table.count().scalar() == 49990
def test_redshift_dwdate(temp_tb):
dshape = datashape.dshape("""var * {
key: int64,
date: string[19],
day_of_week: string[10],
month: string[10],
year: int64,
year_month_num: int64,
year_month: string[8],
day_num_in_week: int64,
day_num_in_month: int64,
day_num_in_year: int64,
month_num_in_year: int64,
week_num_in_year: int64,
selling_season: string[13],
last_day_in_week_fl: string[1],
last_day_in_month_fl: string[1],
holiday_fl: string[1],
weekday_fl: string[1]
}""")
# we have to pass the separator here because the date column has a comma
# TODO: see if we can provide a better error message by querying
# stl_load_errors
assert odo(S3(CSV)('s3://awssampledb/ssbgz/dwdate'),
temp_tb,
delimiter='|',
compression='gzip',
dshape=dshape).count().scalar() == 2556
def test_frame_to_s3_to_frame():
with s3_bucket('.csv') as b:
s3_csv = into(b, df)
result = into(pd.DataFrame, s3_csv)
tm.assert_frame_equal(result, df)
def test_csv_to_redshift(tmpcsv, temp_tb):
assert into(set, into(temp_tb, tmpcsv)) == into(set, tmpcsv)
def test_frame_to_redshift(temp_tb):
tb = into(temp_tb, df)
assert into(set, tb) == into(set, df)
def test_textfile_to_s3():
text = 'A cow jumped over the moon'
with tmpfile('.txt') as fn:
with s3_bucket('.txt') as b:
with open(fn, mode='w') as f:
f.write(os.linesep.join(text.split()))
result = into(b, resource(fn))
assert discover(result) == datashape.dshape('var * string')
def test_jsonlines_to_s3():
with tmpfile('.json') as fn:
with open(fn, mode='w') as f:
for row in js:
f.write(pd.io.json.dumps(row))
f.write(os.linesep)
with s3_bucket('.json') as b:
result = into(b, resource(fn))
assert discover(result) == discover(js)
def test_s3_jsonlines_discover():
json_dshape = discover(resource('s3://nyqpug/tips.json'))
names = list(map(str, sorted(json_dshape.measure.names)))
assert names == ['day', 'sex', 'size', 'smoker', 'time', 'tip',
'total_bill']
types = [json_dshape.measure[name] for name in names]
assert types == [string, string, int64, string, string, float64, float64]
def test_s3_csv_discover():
result = discover(resource('s3://nyqpug/tips.csv'))
expected = datashape.dshape("""var * {
total_bill: ?float64,
tip: ?float64,
sex: ?string,
smoker: ?string,
day: ?string,
time: ?string,
size: int64
}""")
assert result == expected
def test_s3_gz_csv_discover():
result = discover(S3(CSV)('s3://nyqpug/tips.gz'))
expected = datashape.dshape("""var * {
total_bill: ?float64,
tip: ?float64,
sex: ?string,
smoker: ?string,
day: ?string,
time: ?string,
size: int64
}""")
assert result == expected
def test_s3_to_sqlite():
with tmpfile('.db') as fn:
tb = into('sqlite:///%s::tips' % fn, tips_uri,
dshape=discover(resource(tips_uri)))
lhs = into(list, tb)
assert lhs == into(list, tips_uri)
| bsd-3-clause |
Achuth17/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
ephes/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
jmmease/pandas | pandas/tests/io/json/test_pandas.py | 1 | 42616 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
def teardown_method(self, method):
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
| bsd-3-clause |
joshfuchs/ZZCeti_pipeline | spectools.py | 1 | 35547 | """
This program contains various definitions and commonly done things
for spectra for the ZZ CETI pipeline.
Written primarily by JT Fuchs
Based on pySALT
"""
#import pyfits as fits
import astropy.io.fits as fits
import ReduceSpec_tools as rt
import numpy as np
import scipy
from scipy.interpolate import InterpolatedUnivariateSpline as interpo
from scipy.interpolate import UnivariateSpline
import os
class spectrum(object):
def __init__(self,opfarr,farr,sky,sigma,warr):
self.opfarr = opfarr
self.farr = farr
self.sky = sky
self.sigma = sigma
self.warr = warr
# ===========================================================================
class standard(object):
def __init__(self,warr,magarr,wbin):
self.warr = warr
self.magarr = magarr
self.wbin = wbin
# ===========================================================================
def readspectrum(specfile):
""" Given a specfile, read in the spectra and return
a spectrum object consisting of
opfar(optimally extracted spectrum),farr(raw extracted spectrum),sky(background),sigma(sigma spectrum)
"""
spec = fits.open(specfile)
opfarr = spec[0].data[0,0,:]
farr = spec[0].data[1,0,:]
sky = spec[0].data[2,0,:]
sigma = spec[0].data[3,0,:]
#Read in header info
airmass = spec[0].header['airmass']
exptime = spec[0].header['exptime']
'''
#Set up wavelengths using linear dispersion
specwav0 = spec[0].header['crval1'] #Grab the leftmost wavelength coordinate
specdeltawav = spec[0].header['cd1_1'] #Grab the delta coordinate
warr = np.zeros(len(farr)) #Fill an array with appropriate wavelength values
warr[0] = specwav0
ival = np.arange(1,len(farr))
for i in ival:
warr[i] = warr[i-1] + specdeltawav
'''
#Set up wavelengths using grating equation
alpha = float(spec[0].header['GRT_TARG'])
theta = float(spec[0].header['CAM_TARG'])
fr = float(spec[0].header['LINDEN'])
fd = float(spec[0].header['CAMFUD'])
fl = float(spec[0].header['FOCLEN'])
zPnt = float(spec[0].header['ZPOINT'])
trim_sec= spec[0].header["CCDSEC"]
trim_offset= float( trim_sec[1:len(trim_sec)-1].split(':')[0] )-1
try:
bining= float( spec[0].header["PARAM18"] )
except:
bining= float( spec[0].header["PG3_2"] )
nx= np.size(opfarr)#spec_data[0]
Pixels= bining*(np.arange(0,nx,1)+trim_offset)
WDwave = DispCalc(Pixels, alpha, theta, fr, fd, fl, zPnt)
warr = np.asarray(WDwave)
specdeltawav = np.zeros(len(warr))
specdeltawav[0] = warr[1] - warr[0]
for i in range(1,len(warr)):
specdeltawav[i] = warr[i] - warr[i-1]
result = spectrum(opfarr,farr,sky,sigma,warr)
return result,airmass,exptime,specdeltawav
# ===========================================================================
def DispCalc(Pixels, alpha, theta, fr, fd, fl, zPnt):
# This is the Grating Equation used to calculate the wavelenght of a pixel
# based on the fitted parameters and angle set up.
# Inputs:
# Pixels= Vector of Pixel Numbers
# alpha= of Grating Angle
# aheta= Camera Angle
# fr= fringe density of grating
# fd= Camera Angle Correction Factor
# zPnt= Zero point pixel
Wavelengths= [] # Vector to store calculated wavelengths
for pix in Pixels:
beta = np.arctan( (pix-zPnt)*15./fl ) + (fd*theta*np.pi/180.) - (alpha*np.pi/180.)
wave = (10**6.)*( np.sin(beta) + np.sin(alpha*np.pi/180.) )/fr
Wavelengths.append(wave)
return Wavelengths
# ===========================================================================
def readheader(specfile):
spec = fits.open(specfile)
#Delete the parts of the header that are not uniform in Goodman. These are primarily the parts that contain degree symbols.
header = spec[0].header
del header['param0']
del header['param61']
del header['param62']
del header['param63']
return header
# ===========================================================================
def readstandard(stdfile):
warr,magarr,wbin = np.genfromtxt(stdfile,unpack=True)
result = standard(warr,magarr,wbin)
return result
# ===========================================================================
def applywavelengths(wavefile,applyfile,newname):
#Read in file with wavelength solution and get header info
wave = fits.open(wavefile)
n_fr = float(wave[0].header['LINDEN'])
n_fd = float(wave[0].header['CAMFUD'])
fl = float(wave[0].header['FOCLEN'])
zPnt = float(wave[0].header['ZPOINT'])
#Read in file to apply wavelength solution and update header
spec_data= fits.getdata(applyfile)
spec_header= fits.getheader(applyfile)
rt.Fix_Header(spec_header)
spec_header.append( ('LINDEN', n_fr,'Line Desity for Grating Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('CAMFUD', n_fd,'Camera Angle Correction Factor for Grat. Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('FOCLEN', fl,'Focal Length for Grat Eq.'),
useblanks= True, bottom= True )
spec_header.append( ('ZPOINT', zPnt,'Zero Point Pixel for Grat Eq.'),
useblanks= True, bottom= True )
NewspecHdu = fits.PrimaryHDU(data= spec_data, header= spec_header)
#See if new file already exists
mylist = [True for f in os.listdir('.') if f == newname]
exists = bool(mylist)
clob = False
if exists:
print 'File %s already exists.' % newname
nextstep = raw_input('Do you want to overwrite or designate a new name (overwrite/new)? ')
if nextstep == 'overwrite':
clob = True
exists = False
elif nextstep == 'new':
newname = raw_input('New file name: ')
exists = False
else:
exists = False
NewspecHdu.writeto(newname, output_verify='warn', clobber= clob)
# ===========================================================================
def magtoflux(marr, fzero):
"""Convert from magnitude to flux
marr - input array in mags
fzero - zero point for convertion
"""
return fzero * 10. ** (-0.4 * marr)
# ===========================================================================
def fnutofwave(warr,farr):
"""Converts farr in ergs/s/cm2/Hz to ergs/s/cm2/A"""
c = 2.99792458e18 #speed of light in Angstroms/s
return farr * c / warr**2.
# ===========================================================================
def sum_std(std_warr,wbin,spec_warr,spec_farr):
#Sum the standard star spectrum into the same bins as the flux
#calibration file.
n = 0
for lambdas in std_warr:
low = lambdas - wbin[n]/2.
high = lambdas + wbin[n]/2.
#print low,high
c = np.where(spec_warr >= low)
d = np.where(spec_warr <= high)
lowflux = np.asarray(c)
highflux = np.asarray(d)
index = np.intersect1d(lowflux,highflux)
fluxnew = spec_farr[index]
wavenew = spec_warr[index]
#print wavenew[0],wavenew[-1]
total = np.sum(fluxnew)
if n == 0:
result = [total]
if n > 0:
result.append(total)
#blah = np.asarray(result)
#print blah[n]
n += 1.
return np.asarray(result)
# ===========================================================================
def sensfunc(obs_counts,std_flux,exptime,bins,airmass):
#This function calculates the sensitivity curve for the spectrum
#It is calculated by:
#C = 2.5 * log(obs_counts/ (exptime * bin * std_flux)) + airmass * extinction
n = 0
for counts in obs_counts:
cal = 2.5 * np.log10(counts/ (exptime * bins[n] * std_flux[n]))
if n == 0:
sens = [cal]
if n > 0:
sens.append(cal)
n += 1
sensitivity = np.asarray(sens)
return sensitivity
# ===========================================================================
def cal_spec(counts,sens,exptime,disp):
#Calibrates a observed star using a sensitivity function
flux = (counts) / (exptime * disp * 10.**(sens/2.5))
return flux
# ===========================================================================
def extinction_correction(lams, flux, airmass):
'''
Extinction correction based on Strizinger et. al. 2005 values for CTIO
'''
# Function inputs are wavelengths and flux values for the spectrum as well
# as the airmass the spectrum was measured at
# wavelength-dependent extinction coefficients from CTIO
# Strizinger et. al. 2005
ctio_lams = [3050.0, 3084.6500000000001, 3119.3099999999999, 3153.96, 3188.6100000000001, 3223.27, 3257.9200000000001, 3292.5700000000002, 3327.23, 3361.8800000000001, 3396.54, 3431.1900000000001, 3465.8400000000001, 3500.5, 3535.1500000000001, 3569.8000000000002, 3604.46, 3639.1100000000001, 3673.7600000000002, 3708.4200000000001, 3743.0700000000002, 3777.7199999999998, 3812.3800000000001, 3847.0300000000002, 3881.6900000000001, 3916.3400000000001, 3950.9899999999998, 3985.6500000000001, 4020.3000000000002, 4054.9499999999998, 4089.6100000000001, 4124.2600000000002, 4158.9099999999999, 4193.5699999999997, 4228.2200000000003, 4262.8699999999999, 4297.5299999999997, 4332.1800000000003, 4366.8299999999999, 4401.4899999999998, 4436.1400000000003, 4470.79, 4505.4499999999998, 4540.1000000000004, 4574.7600000000002, 4609.4099999999999, 4644.0600000000004, 4678.7200000000003, 4713.3699999999999, 4748.0200000000004, 4782.6800000000003, 4817.3299999999999, 4851.9799999999996, 4886.6400000000003, 4921.29, 4955.9399999999996, 4990.6000000000004, 5025.25, 5059.9099999999999, 5094.5600000000004, 5129.21, 5163.8699999999999, 5198.5200000000004, 5233.1700000000001, 5267.8299999999999, 5302.4799999999996, 5337.1300000000001, 5371.79, 5406.4399999999996, 5441.0900000000001, 5475.75, 5510.3999999999996, 5545.0500000000002, 5579.71, 5614.3599999999997, 5649.0200000000004, 5683.6700000000001, 5718.3199999999997, 5752.9799999999996, 5787.6300000000001, 5822.2799999999997, 5856.9399999999996, 5891.5900000000001, 5926.2399999999998, 5960.8999999999996, 5995.5500000000002, 6030.1999999999998, 6064.8599999999997, 6099.5100000000002, 6134.1700000000001, 6168.8199999999997, 6203.4700000000003, 6238.1300000000001, 6272.7799999999997, 6307.4300000000003, 6342.0900000000001, 6376.7399999999998, 6411.3900000000003, 6446.0500000000002, 6480.6999999999998, 6482.8500000000004, 6535.3800000000001, 6587.9099999999999, 6640.4399999999996, 6692.96, 6745.4899999999998, 6798.0200000000004, 6850.5500000000002, 6903.0699999999997, 6955.6000000000004, 7008.1300000000001, 7060.6499999999996, 7113.1800000000003, 7165.71, 7218.2399999999998, 7270.7600000000002, 7323.29, 7375.8199999999997, 7428.3500000000004, 7480.8699999999999, 7533.3999999999996, 7585.9300000000003, 7638.4499999999998, 7690.9799999999996, 7743.5100000000002, 7796.04, 7848.5600000000004, 7901.0900000000001, 7953.6199999999999, 8006.1499999999996, 8058.6700000000001, 8111.1999999999998, 8163.7299999999996, 8216.25, 8268.7800000000007, 8321.3099999999995, 8373.8400000000001, 8426.3600000000006, 8478.8899999999994, 8531.4200000000001, 8583.9500000000007, 8636.4699999999993, 8689.0, 8741.5300000000007, 8794.0499999999993, 8846.5799999999999, 8899.1100000000006, 8951.6399999999994, 9004.1599999999999, 9056.6900000000005, 9109.2199999999993, 9161.75, 9214.2700000000004, 9266.7999999999993, 9319.3299999999999, 9371.8500000000004, 9424.3799999999992, 9476.9099999999999, 9529.4400000000005, 9581.9599999999991, 9634.4899999999998, 9687.0200000000004, 9739.5499999999993, 9792.0699999999997, 9844.6000000000004, 9897.1299999999992, 9949.6499999999996, 10002.200000000001, 10054.700000000001, 10107.200000000001, 10159.799999999999, 10212.299999999999, 10264.799999999999, 10317.299999999999, 10369.9, 10422.4, 10474.9, 10527.5, 10580.0, 10632.5, 10685.0, 10737.6, 10790.1, 10842.6, 10895.1, 10947.700000000001, 11000.200000000001]
ctio_ext = [1.395, 1.2830000000000001, 1.181, 1.0880000000000001, 1.004, 0.92900000000000005, 0.86099999999999999, 0.80099999999999993, 0.748, 0.69999999999999996, 0.65900000000000003, 0.623, 0.59099999999999997, 0.56399999999999995, 0.54000000000000004, 0.52000000000000002, 0.502, 0.48700000000000004, 0.47299999999999998, 0.46000000000000002, 0.44799999999999995, 0.436, 0.42499999999999999, 0.41399999999999998, 0.40200000000000002, 0.39100000000000001, 0.38100000000000001, 0.37, 0.35999999999999999, 0.34899999999999998, 0.33899999999999997, 0.33000000000000002, 0.32100000000000001, 0.313, 0.30399999999999999, 0.29600000000000004, 0.28899999999999998, 0.28100000000000003, 0.27399999999999997, 0.26700000000000002, 0.26000000000000001, 0.254, 0.247, 0.24100000000000002, 0.23600000000000002, 0.23000000000000001, 0.22500000000000001, 0.22, 0.215, 0.20999999999999999, 0.20600000000000002, 0.20199999999999999, 0.19800000000000001, 0.19399999999999998, 0.19, 0.187, 0.184, 0.18100000000000002, 0.17800000000000002, 0.17600000000000002, 0.17300000000000001, 0.17100000000000001, 0.16899999999999998, 0.16699999999999998, 0.16600000000000001, 0.16399999999999998, 0.16300000000000001, 0.16200000000000001, 0.16, 0.159, 0.158, 0.158, 0.157, 0.156, 0.155, 0.155, 0.154, 0.153, 0.153, 0.152, 0.151, 0.151, 0.14999999999999999, 0.14899999999999999, 0.14899999999999999, 0.14800000000000002, 0.14699999999999999, 0.14599999999999999, 0.14400000000000002, 0.14300000000000002, 0.14199999999999999, 0.14000000000000001, 0.13800000000000001, 0.13600000000000001, 0.13400000000000001, 0.13200000000000001, 0.129, 0.126, 0.12300000000000001, 0.12, 0.12, 0.115, 0.111, 0.107, 0.10300000000000001, 0.099000000000000005, 0.096000000000000002, 0.091999999999999998, 0.088000000000000009, 0.085000000000000006, 0.08199999999999999, 0.078, 0.074999999999999997, 0.072000000000000008, 0.069000000000000006, 0.066000000000000003, 0.064000000000000001, 0.060999999999999999, 0.057999999999999996, 0.055999999999999994, 0.052999999999999999, 0.050999999999999997, 0.049000000000000002, 0.047, 0.044999999999999998, 0.042999999999999997, 0.040999999999999995, 0.039, 0.037000000000000005, 0.035000000000000003, 0.034000000000000002, 0.032000000000000001, 0.029999999999999999, 0.028999999999999998, 0.027999999999999997, 0.026000000000000002, 0.025000000000000001, 0.024, 0.023, 0.022000000000000002, 0.02, 0.019, 0.019, 0.018000000000000002, 0.017000000000000001, 0.016, 0.014999999999999999, 0.014999999999999999, 0.013999999999999999, 0.013000000000000001, 0.013000000000000001, 0.012, 0.011000000000000001, 0.011000000000000001, 0.011000000000000001, 0.01, 0.01, 0.0090000000000000011, 0.0090000000000000011, 0.0090000000000000011, 0.0080000000000000002, 0.0080000000000000002, 0.0080000000000000002, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0069999999999999993, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0060000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0050000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0040000000000000001, 0.0030000000000000001, 0.0030000000000000001, 0.0030000000000000001]
smooth_param = 0.001
spline_fit = UnivariateSpline(ctio_lams, ctio_ext, s=smooth_param, k=3)
a_lambda = spline_fit(lams)
corrected_flux = flux*(10.0**(.4*a_lambda*(airmass)))
xx = np.linspace(np.min(ctio_lams), np.max(ctio_lams), 1000)
yy = spline_fit(xx)
'''
plt.figure()
plt.scatter(ctio_lams, ctio_ext, label=smooth_param)
plt.axvline(np.min(lams), color='g')
plt.axvline(np.max(lams), color='g')
plt.plot(xx, yy)
plt.xlabel('Wavelength')
plt.ylabel('Extinction Coefficient')
plt.title('Gemini Extinction Coefficient Fit')
'''
'''
plt.figure()
plt.plot(lams,flux)
plt.plot(lams,corrected_flux)
plt.show()
'''
return corrected_flux
# ===========================================================================
def resamplespec(w1, w0, spec0, oversamp=100):
"""
Resample a spectrum while conserving flux density.
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUTS:
w1 : sequence
new wavelength grid (i.e., center wavelength of each pixel)
w0 : sequence
old wavelength grid (i.e., center wavelength of each pixel)
spec0 : sequence
old spectrum (e.g., flux density or photon counts)
oversamp : int
factor by which to oversample input spectrum prior to
rebinning. The worst fractional precision you achieve is
roughly 1./oversamp.
:NOTE:
Format is the same as :func:`numpy.interp`
:REQUIREMENTS:
:doc:`tools`
"""
#from tools import errxy
# 2012-04-25 18:40 IJMC: Created
nlam = len(w0)
x0 = np.arange(nlam, dtype=float)
x0int = np.arange((nlam-1.)*oversamp + 1., dtype=float)/oversamp
w0int = np.interp(x0int, x0, w0)
spec0int = np.interp(w0int, w0, spec0)/oversamp
# Set up the bin edges for down-binning
maxdiffw1 = np.diff(w1).max()
w1bins = np.concatenate(([w1[0] - maxdiffw1],
.5*(w1[1::] + w1[0:-1]), \
[w1[-1] + maxdiffw1]))
# Bin down the interpolated spectrum:
junk, spec1, junk2, junk3 = errxy(w0int, spec0int, w1bins, xmode=None, ymode='sum', xerr=None, yerr=None)
return spec1
# ===========================================================================
def errxy(x,y,xbins, xmode='mean', ymode='mean', xerr='minmax', yerr='sdom', clean=None, binfactor=None, verbose=False,returnstats=False, timing=False):
"""Bin down datasets in X and Y for errorbar plotting
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUTS:
x -- (array) independent variable data
y -- (array) dependent variable data
xbins -- (array) edges of bins, in x-space. Only x-data
between two bin edges will be used. Thus if M bin
edges are entered, (M-1) datapoints will be returned.
If xbins==None, then no binning is done.
:OPTIONAL INPUT:
xmode/ymode -- (str) method to aggregate x/y data into datapoints:
'mean' -- use numpy.mean
'median' -- use numpy.median
'sum' -- use numpy.sum
None -- don't compute; return the empty list []
xerr/yerr -- (str) method to aggregate x/y data into errorbars
'std' -- sample standard deviation (numpy.std)
'sdom' -- standard deviation on the mean; i.e., std/sqrt(N)
'minmax' -- use full range of data in the bin
None -- don't compute; return the empty list []
binfactor -- (int) If not None, average over this many
consecutive values instead of binning explicitly by
time-based bins. Can also be a sequence, telling the
number of values over which to average. E.g.,
binfactor=[10,10,20] will bin over the first 10 points,
the second 10 points, and the next 20 points.
clean -- (dict) keyword options to clean y-data ONLY, via
analysis.removeoutliers, with an additional "nsigma"
keyword. See removeoutliers for more information.
E.g.: clean=dict(nsigma=5,remove='both',niter=1)
:OUTPUTS: a tuple of four arrays to be passed to matplotlib.pyplot.errorbar:
xx -- locations of the aggregated x-datapoint in each bin
yy -- locations of the aggregated y-datapoint in each bin
xerr -- x-errorbars
yerr -- y-errorbars
:EXAMPLE:
::
x = hstack((arange(10), arange(20)+40))
y = randn(len(x))
xbins = [-1,15,70]
xx,yy,xerr,yerr = errxy(x,y,xbins)
plot(x,y, '.b')
errorbar(xx,yy,xerr=xerr,yerr=yerr, fmt='or')
:NOTES:
To just bin down uncleaned data (i.e., no 'error' terms
returned), set clean, xerr, yerr to None. However, when
computing all values (xerr and yerr not None) it is faster
to set clean to some rediculous value, i.e.,
clean=dict(niter=0, nsigma=9e99). This probably means more
optimization could be done.
Be sure you call the errorbar function using the keywords xerr
and yerr, since otherwise the default order of inputs to the
function is (x,y,yerr,xerr).
Data 'x' are determined to be in a bin with sides (L, R) when
satisfying the condition (x>L) and (x<=R)
:SEE ALSO: matplotlib.pyplot.errorbar, :func:`analysis.removeoutliers`
:REQUIREMENTS: :doc:`numpy`, :doc:`analysis`
"""
# 2009-09-29 20:07 IJC: Created w/mean-median and std-sdom-minmax.
# 2009-12-14 16:01 IJC: xbins can be 'None' for no binning.
# 2009-12-15 10:09 IJC: Added "binfactor" option.
# 2009-12-22 09:56 IJC: "binfactor" can now be a sequence.
# 2009-12-29 01:16 IJC: Fixed a bug with binfactor sequences.
# 2010-04-29 09:59 IJC: Added 'returnstats' feature
# 2010-10-19 16:25 IJC: Added 'sum' option for x-data
# 2011-03-22 12:57 IJC: Added 'none' option for data and errors
# 2012-03-20 16:33 IJMC: Fixed bug; xmode=='none' now works.
# 2012-03-27 14:00 IJMC: Now using np.digitize -- speed boost.
# Rewrote code to optimize (somewhat),
# cleaned up 'import' statements.
# 2012-04-08 15:57 IJMC: New speed boost from adopting
# numpy.histogram-like implementation:
# numpy.searchsorted, etc.
#from analysis import removeoutliers
import numpy as np
if timing:
import time
tic = time.time()
def sdom(data):
"""Return standard deviation of the mean."""
return np.std(data)/np.sqrt(data.size)
def getcenter(data, cmode):
"""Get data center based on mode. Helper function."""
if cmode is None:
ret = 0
elif cmode=='mean':
ret = np.mean(data)
elif cmode=='median':
ret = np.median(data)
elif cmode=='sum':
ret = np.sum(data)
return ret
def geterr(data, emode, cmode):
"""Get errorbar. Helper function."""
if emode is None:
ret = []
elif emode=='std':
ret = np.std(data)
elif emode=='sdom':
ret = sdom(data)
elif emode=='minmax':
if len(data)==0:
ret = [np.nan, np.nan]
else:
center = getcenter(data,cmode)
ret = [center-min(data), max(data)-center]
return ret
def cleandata(data, clean, returnstats=False):
"""Clean data using removeoutliers. Helper function."""
init_count = np.array(data).size
if clean==None: # Don't clean at all!
#clean = dict(nsigma=1000, niter=0)
if returnstats:
ret = data, (init_count, init_count)
else:
ret = data
else: # Clean the data somehow ('clean' must be a dict)
if not clean.has_key('nsigma'):
clean.update(dict(nsigma=99999))
data = removeoutliers(data, **clean)
if returnstats:
ret = data, (init_count, np.array(data).size)
else:
ret = data
return ret
if timing:
print "%1.3f sec since starting function; helpers defined" % (time.time() - tic)
####### Begin main function ##########
sorted_index = np.argsort(x)
x = np.array(x, copy=False)[sorted_index]
y = np.array(y, copy=False)[sorted_index]
#x = np.array(x,copy=True).ravel()
#y = np.array(y,copy=True).ravel()
xbins = np.array(xbins,copy=True).ravel()
if xbins[0]==None and binfactor==None:
if returnstats ==False:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan
else:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan, (x.size, x.size)
return ret
if binfactor==None: # used passed-in 'xbins'
xbins = np.sort(xbins)
elif hasattr(binfactor,'__iter__'): # use variable-sized bins
binfactor = np.array(binfactor).copy()
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = []
counter = 0
for ii in range(len(binfactor)):
thisbin = betweens[counter]
xbins.append(thisbin)
counter += binfactor[ii]
xbins.append(x.max() + 1)
else: # bin down by the same factor throughout
binfactor = int(binfactor)
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = betweens[::binfactor]
if timing:
print "%1.3f sec since starting function; bins defined" % (time.time() - tic)
nbins = len(xbins)-1
arraynan = np.array([np.nan])
exx = []
eyy = []
xx = np.zeros(nbins)
yy = np.zeros(nbins)
yy2 = np.zeros(nbins)
init_count, final_count = y.size, 0
if timing:
setuptime = 0
xdatatime = 0
ydatatime = 0
statstime = 0
#import pylab as py
#xxx = np.sort(x)
if timing: tic1 = time.time()
#inds = np.digitize(x, xbins)
inds2 = [[x.searchsorted(xbins[ii], side='left'), \
x.searchsorted(xbins[ii+1], side='left')] for ii in range(nbins)]
if timing: setuptime += (time.time() - tic1)
#pdb.set_trace()
#bin_means = [data[digitized == i].mean() for i in range(1, len(bins))]
dox = xmode is not None
doy = ymode is not None
doex = xerr is not None
doey = yerr is not None
if clean is None:
if timing: tic3 = time.time()
if dox: exec ('xfunc = np.%s' % xmode) in locals()
if doy: exec ('yfunc = np.%s' % ymode) in locals()
for ii in range(nbins):
#index = inds==(ii+1)
if dox:
#xx[ii] = xfunc(x[index])
xx[ii] = xfunc(x[inds2[ii][0]:inds2[ii][1]])
if doy:
#yy[ii] = yfunc(y[index])
yy[ii] = yfunc(y[inds2[ii][0]:inds2[ii][1]])
if doex:
#exx.append(geterr(x[index], xerr, xmode))
exx.append(geterr(x[inds2[ii][0]:inds2[ii][1]], xerr, xmode))
if doey:
#eyy.append(geterr(y[index], yerr, ymode))
eyy.append(geterr(y[inds2[ii][0]:inds2[ii][1]], yerr, ymode))
if timing: statstime += (time.time() - tic3)
#pdb.set_trace()
else:
for ii in range(nbins):
if timing: tic1 = time.time()
#index = inds==(ii+1)
if timing: setuptime += (time.time() - tic1)
if timing: tic2 = time.time()
xdata = x[inds2[ii][0]:inds2[ii][1]]
if timing: xdatatime += (time.time() - tic2)
if timing: tic25 = time.time()
if ymode is None and yerr is None: # We're free to ignore the y-data:
ydata = arraynan
else: # We have to compute something with the y-data:
if clean is not None:
ydata, retstats = cleandata(y[inds2[ii][0]:inds2[ii][1]], clean, returnstats=True)
if returnstats:
final_count += retstats[1]
else: # We don't have to clean the data
ydata = y[inds2[ii][0]:inds2[ii][1]]
if returnstats:
final_count += ydata.size
if timing: ydatatime += (time.time() - tic25)
if timing: tic3 = time.time()
xx[ii] = getcenter(xdata,xmode)
if timing: tic4 = time.time()
yy[ii] = getcenter(ydata,ymode)
if timing: tic5 = time.time()
exx.append(geterr( xdata,xerr,xmode))
if timing: tic6 = time.time()
eyy.append(geterr( ydata,yerr,ymode))
if timing: tic7 = time.time()
if timing: statstime += (time.time() - tic3)
#exx[ii] = geterr( xdata,xerr,xmode)
#eyy[ii] = geterr( ydata,yerr,ymode)
if timing:
print "%1.3f sec for setting up bins & indices..." % setuptime
print "%1.3f sec for getting x data clean and ready." % xdatatime
print "%1.3f sec for getting y data clean and ready." % ydatatime
#print "%1.3f sec for computing x-data statistics." % (tic4-tic3)
#print "%1.3f sec for computing y-data statistics." % (tic5-tic4)
#print "%1.3f sec for computing x-error statistics." % (tic6-tic5)
#print "%1.3f sec for computing y-error statistics." % (tic7-tic6)
print "%1.3f sec for computing statistics........." % statstime
if timing:
print "%1.3f sec since starting function; uncertainties defined" % (time.time() - tic)
#xx = array(xx)
#yy = array(yy)
exx = np.array(exx).transpose() # b/c 2D if minmax option used
eyy = np.array(eyy).transpose() # b/c 2D if minmax option used
#pdb.set_trace()
if returnstats:
ret= xx,yy,exx,eyy,(init_count, final_count)
else:
ret = xx,yy,exx,eyy
#print 'tools: returnstats, len(ret)>>', returnstats, len(ret)
if timing:
print "%1.3f sec since starting function; returning" % (time.time() - tic)
return ret
# ===========================================================================
def removeoutliers(data, nsigma, remove='both', center='mean', niter=500, retind=False, verbose=False):
"""Strip outliers from a dataset, iterating until converged.
Written by Ian Crossfield: www.lpl.arizona.edu/~ianc/python/index.html
:INPUT:
data -- 1D numpy array. data from which to remove outliers.
nsigma -- positive number. limit defining outliers: number of
standard deviations from center of data.
:OPTIONAL INPUTS:
remove -- ('min'|'max'|'both') respectively removes outliers
below, above, or on both sides of the limits set by
nsigma.
center -- ('mean'|'median'|value) -- set central value, or
method to compute it.
niter -- number of iterations before exit; defaults to Inf,
which can occasionally result in empty arrays returned
retind -- (bool) whether to return index of good values as
second part of a 2-tuple.
:EXAMPLE:
::
from numpy import hist, linspace, randn
from analysis import removeoutliers
data = randn(1000)
hbins = linspace(-5,5,50)
d2 = removeoutliers(data, 1.5, niter=1)
hist(data, hbins)
hist(d2, hbins)
"""
# 2009-09-04 13:24 IJC: Created
# 2009-09-24 17:34 IJC: Added 'retind' feature. Tricky, but nice!
# 2009-10-01 10:40 IJC: Added check for stdev==0
# 2009-12-08 15:42 IJC: Added check for isfinite
from numpy import median, ones, isfinite
def getcen(data, method):
"Get central value of a 1D array (helper function)"
if method.__class__==str:
if method=='median':
cen = median(data)
else:
cen = data.mean()
else:
cen = method
return cen
def getgoodindex(data, nsigma, center, stdev, remove):
"Get number of outliers (helper function!)"
if stdev==0:
distance = data*0.0
else:
distance = (data-center)/stdev
if remove=='min':
goodind = distance>-nsigma
elif remove=='max':
goodind = distance<nsigma
else:
goodind = abs(distance)<=nsigma
return goodind
data = data.ravel().copy()
ndat0 = len(data)
ndat = len(data)
iter=0
goodind = ones(data.shape,bool)
goodind *= isfinite(data)
while ((ndat0<>ndat) or (iter==0)) and (iter<niter) and (ndat>0) :
ndat0 = len(data[goodind])
cen = getcen(data[goodind], center)
stdev = data[goodind].std()
thisgoodind = getgoodindex(data[goodind], nsigma, cen, stdev, remove)
goodind[find(goodind)] = thisgoodind
if verbose:
print "cen>>",cen
print "std>>",stdev
ndat = len(data[goodind])
iter +=1
if verbose:
print ndat0, ndat
if retind:
ret = data[goodind], goodind
else:
ret = data[goodind]
return ret
# ===========================================================================
def resample(old_dispersion, new_dispersion):
"""
Written by Andy Casey: https://github.com/andycasey
This program is found under sick/specutils.py
The output is a compressed sparse column matrix that tells the ratio between the old and new binning. To get the fluxes of the new binning: newflux = np.dot(oldflux,output.toarray())
Resample a spectrum to a new dispersion map while conserving total flux.
:param old_dispersion:
The original dispersion array.
:type old_dispersion:
:class:`numpy.array`
:param new_dispersion:
The new dispersion array to resample onto.
:type new_dispersion:
:class:`numpy.array`
"""
data = []
old_px_indices = []
new_px_indices = []
for i, new_wl_i in enumerate(new_dispersion):
# These indices should span just over the new wavelength pixel.
indices = np.unique(np.clip(
old_dispersion.searchsorted(new_dispersion[i:i + 2], side="left") \
+ [-1, +1], 0, old_dispersion.size - 1))
N = np.ptp(indices)
if N == 0:
# 'Fake' pixel.
data.append(np.nan)
new_px_indices.append(i)
old_px_indices.extend(indices)
continue
# Sanity checks.
assert (old_dispersion[indices[0]] <= new_wl_i \
or indices[0] == 0)
assert (new_wl_i <= old_dispersion[indices[1]] \
or indices[1] == old_dispersion.size - 1)
fractions = np.ones(N)
# Edges are handled as fractions between rebinned pixels.
_ = np.clip(i + 1, 0, new_dispersion.size - 1)
lhs = old_dispersion[indices[0]:indices[0] + 2]
rhs = old_dispersion[indices[-1] - 1:indices[-1] + 1]
fractions[0] = (lhs[1] - new_dispersion[i])/np.ptp(lhs)
fractions[-1] = (new_dispersion[_] - rhs[0])/np.ptp(rhs)
# Being binned to a single pixel. Prevent overflow from fringe cases.
fractions = np.clip(fractions, 0, 1)
fractions /= fractions.sum()
data.extend(fractions)
new_px_indices.extend([i] * N) # Mark the new pixel indices affected.
old_px_indices.extend(np.arange(*indices)) # And the old pixel indices.
return scipy.sparse.csc_matrix((data, (old_px_indices, new_px_indices)),
shape=(old_dispersion.size, new_dispersion.size))
| mit |
karoraw1/xMetaPipeline | bin/regress_against_procs.py | 1 | 6979 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 18:49:55 2017
@author: login
"""
from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from otu_ts_support import parseBiosample, lasso_ridge_ftest
from calibration_functions import importratesandconcs_mod
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
import numpy as np
from calibration_functions import standard_scale_df
expected_seqs = ["seq188", "seq12", "seq172", "seq135", "seq228", "seq3",
"seq229", "seq106", "seq29", "seq3397", "seq258"]
pairs =[ ("Bin_4_1", "seq3397"),#
("Bin_33_1", "seq214"),#
("Bin_6_1", "seq106"), #
("Bin_1_1", "seq228"), #
("Bin_23_1", "seq3"), #
("Bin_46_1", "seq39"), #
("Bin_46_2", "seq29"), #
("Bin_2", "seq450"), # not 258
("Bin_48", "seq6"), # not 105
("Bin_7", "seq229"), #
("Bin_45", "seq188"), #
("Bin_11", "seq12"), #
("Bin_40", "seq172"), #
("Bin_32", "seq552")] #
_, key_seqs = zip(*pairs)
notable_seqs = set(expected_seqs + list(key_seqs))
model_mat_f = "../data/new_model.mat"
proc_conc_df = importratesandconcs_mod(model_mat_f, 'full df')
otu_matrix_file = '../data/unique.dbOTU.nonchimera.mat.rdp'
otu_table = pd.read_csv(otu_matrix_file, sep="\t", index_col=0)
taxa_series = otu_table.ix[:, -1]
otu_time_table = otu_table.drop([otu_table.columns[-1]], axis=1)
do_not_use = ['SB100912TAWMD14VV4TMR1', 'SB061713TAWMD22VV4TMR1',
'SB011413TAWMD22VV4TMR1', 'SB011413TAWMDSBVV4TMR1',
'SB011413TAWMDEBVV4TMR1', 'SB011413TAWMD22VV4TMR2',
'SB011413TAWMDSBVV4TMR2']
otu_time_table.drop(do_not_use, axis=1, inplace=True)
samps_by_feats = parseBiosample(otu_time_table.T)
undated = samps_by_feats[samps_by_feats.date == "NA"].index
samps_by_feats.drop(undated, inplace=True)
samps_by_feats['date'] = pd.to_datetime(samps_by_feats['date'])
squishy_depths = ['bottom', 'SB', 'control1', 'control3', 'control6', 'mid1',
'mid2', 'mid3', 'river', 'upper', 'River', 'Neg', 'NA', 'EB',
'FB', 'CR', 'Dock', 'Bridge']
# Drop samples of unknown provenance & sort by depth and date
off_target = samps_by_feats[samps_by_feats.depth.isin(squishy_depths)].index
only_depths = samps_by_feats.drop(off_target, inplace=False)
only_depths.sort_values(['date', 'depth'], ascending=[True, True],
inplace=True)
metadata = ['date', 'primers', 'kit', 'depth', 'replicates']
only_depth_otus = only_depths.drop(metadata, axis=1)
sample_totals = only_depth_otus.sum(axis=1)
sample_normed_otus = only_depth_otus.divide(sample_totals, axis='rows')
abund_thresh = 150
otu_totals = sample_normed_otus.sum().sort_values(ascending=False)
abundant_otus = otu_totals.index[:abund_thresh]
#common_otus = sample_normed_otus.ix[:, abundant_otus]
common_otus = sample_normed_otus.ix[:, notable_seqs]
common_metadata = only_depths.ix[:, metadata]
common_otus['Depth'] = common_metadata.depth.apply(pd.to_numeric)
common_otus['Date'] = common_metadata.date
common_otus.set_index(['Date', 'Depth'], inplace=True)
proc_conc_df.index.names = ['Date', 'Depth']
proc_in_otus = proc_conc_df.index.isin(common_otus.index)
otus_in_proc = common_otus.index.isin(proc_conc_df.index)
sub_otus = common_otus.ix[otus_in_proc, :]
sub_procs = proc_conc_df.ix[proc_in_otus, :]
non_rednt_samples = pd.DataFrame(index=sub_procs.index,
columns=sub_otus.columns)
for idx in sub_procs.index:
sub_sub_otus = sub_otus.ix[idx, :]
if sub_sub_otus.shape[0] != 1:
sub_sub_otus['newidx'] = np.arange(sub_sub_otus.shape[0])
non_rednt_samples.ix[idx, :] = sub_sub_otus.ix[0, :]
else:
non_rednt_samples.ix[idx, :] = sub_sub_otus.values.flatten()
shared_x_std = standard_scale_df(non_rednt_samples)
model_results = lasso_ridge_ftest(shared_x_std, sub_procs.drop(['Null'], 1))
ridge_coeff_df, parsi_coeff_df, score_df, select_features = model_results
score_df.sort_values(['r2-parsi'], ascending=False, inplace=True)
table_s5 = score_df.drop(['df','df-parsi'], 1)
table_s5['% DF reduction'] = 1 - (score_df['df-parsi']/score_df.df)
table_s5.to_csv("../data/otu_model_scoring.tsv", sep="\t", float_format='%.3f')
good_models = table_s5[table_s5['r2-ridge'] > 0.0].index
good_order = ['O', 'C', 'N+', 'N-', 'S+', 'S-', 'CH4','Fe+','Fe-',
'Aerobic Heterotrophy', 'Sulfate Reduction',
'Methane Oxidation (sulfate)', 'Methanogenesis',
'Iron Oxidation (oxygen)', 'Sulfur Oxidation (nitrate)',
'Methane Oxidation (nitrate)']
seq_ordered = ['seq3', 'seq6', 'seq12', 'seq29', 'seq39', 'seq106', 'seq135',
'seq172', 'seq188', 'seq214', 'seq228', 'seq229', 'seq258',
'seq450', 'seq552', 'seq3397']
coeff_df = pd.DataFrame(index=seq_ordered, columns=good_order,
data=np.zeros((len(notable_seqs), len(good_order))))
back_layer_df = pd.DataFrame(index=seq_ordered, columns=good_order,
data=np.zeros((len(notable_seqs), len(good_order))))
for cp in good_order:
good_features = select_features[cp]
for seq in seq_ordered:
if seq in good_features:
this_coeff = parsi_coeff_df.ix[cp, seq]
this_bl = 1
else:
this_coeff = ridge_coeff_df.ix[cp, seq]
this_bl = 0
back_layer_df.ix[seq, cp] = this_bl
coeff_df.ix[seq, cp] = this_coeff
def cluster_df(coeff_df):
labels1, labels2 = coeff_df.index, coeff_df.columns
row_clusters = linkage(pdist(coeff_df, metric='euclidean'), method='complete')
row_dendr = dendrogram(row_clusters, labels=labels1, no_plot=True)
coeff_df_rc = coeff_df.ix[row_dendr['leaves']]
col_dists = pdist(coeff_df.T, metric='euclidean')
col_clusters = linkage(col_dists, method='complete')
col_dendr = dendrogram(col_clusters, labels=labels2, no_plot=True)
coeff_df_rc.columns = [coeff_df_rc.columns[col_dendr['leaves']]]
return coeff_df_rc
coeff_df_rc = cluster_df(coeff_df)
plt.figure(1, figsize=(12,9))
cmap = sns.diverging_palette(145, 280, s=85, l=25, n=7, sep=1, as_cmap=True)
ax = sns.heatmap(coeff_df_rc, cmap=cmap)
ylab = ax.get_ylabel(); xlab = ax.get_xlabel();
ax.set_xlabel(xlab, fontsize=16); ax.set_ylabel(ylab, fontsize=16)
for item in ax.get_yticklabels():
item.set_rotation(0)
item.set_fontsize(14)
for item in ax.get_xticklabels():
item.set_rotation(90)
item.set_fontsize(14)
plt.tight_layout()
plt.savefig('regression_coefficients.png', dpi=100)
back_layer_df_cc = cluster_df(back_layer_df)
back_layer_df_cc.to_csv("../data/significance_of_key_seqs.tsv", sep="\t",
float_format='%.0f')
| bsd-2-clause |
cpcloud/bokeh | bokeh/charts/scatter.py | 1 | 3462 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Scatter class which lets you build your scatter plots just passing
the arguments to the Chart class and calling the proper functions.
It also add detection of the incomming input to see if it is a pandas dataframe
or a pandas groupby object.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from ._charts import Chart
from ._chartobject import ChartObject
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class Scatter(ChartObject):
def __init__(self, pairs,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="linear", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False):
self.pairs = pairs
super(Scatter, self).__init__(title, xlabel, ylabel, legend,
xscale, yscale, width, height,
tools, filename, server, notebook)
def check_attr(self):
super(Scatter, self).check_attr()
def show(self):
"This is the main Scatter show function."
# asumming we get an hierchiral pandas object
if isinstance(self.pairs, pd.DataFrame):
self.labels = self.pairs.columns.levels[1].values
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.columns.levels[0].values:
pdict[i] = self.pairs[i].dropna().values
self.pairs = pdict
# asumming we get an groupby object
if isinstance(self.pairs, pd.core.groupby.DataFrameGroupBy):
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.groups.keys():
self.labels = self.pairs.get_group(i).columns
xname = self.pairs.get_group(i).columns[0]
yname = self.pairs.get_group(i).columns[1]
x = getattr(self.pairs.get_group(i), xname)
y = getattr(self.pairs.get_group(i), yname)
pdict[i] = np.array([x.values, y.values]).T
self.pairs = pdict
self.check_attr()
if self._xlabel is None:
self._xlabel = self.labels[0]
if self._ylabel is None:
self._ylabel = self.labels[1]
chart = Chart(self._title, self._xlabel, self._ylabel, self._legend,
self.xscale, self.yscale, self._width, self._height,
self._tools, self._filename, self._server, self._notebook)
chart.get_data_scatter(**self.pairs)
chart.get_source_scatter()
chart.start_plot()
chart.scatter()
chart.end_plot()
chart.show()
| bsd-3-clause |
lukas/ml-class | examples/scikit/cross-validation-embedding.py | 2 | 1331 | import pandas as pd
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from embedding import MeanEmbeddingVectorizer
from tokenizer import Tokenizer
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.ensemble import ExtraTreesClassifier
from wandblog import log
import wandb
run = wandb.init(job_type='eval')
config = run.config
df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
w2v = {}
with open("glove/glove.6B.50d.txt", "r") as lines:
for line in lines:
word, numbers = line.split(" ", 1)
number_array = np.array(numbers.split()).astype(np.float)
w2v[word] = number_array
text_clf = Pipeline([('token', Tokenizer()),
('vect', MeanEmbeddingVectorizer(w2v)),
("extra trees", ExtraTreesClassifier(n_estimators=200)),])
text_clf.fit(fixed_text, fixed_target)
scores = cross_val_score(text_clf, fixed_text, fixed_target)
print(scores)
print(scores.mean())
predictions = cross_val_predict(text_clf, fixed_text, fixed_target)
log(run, fixed_text, fixed_target, predictions)
| gpl-2.0 |
nelson-liu/scikit-learn | examples/exercises/plot_cv_diabetes.py | 53 | 2861 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
scores = list()
scores_std = list()
n_folds = 3
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_val_score(lasso, X, y, cv=n_folds, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
scores, scores_std = np.array(scores), np.array(scores_std)
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
bjlittle/iris | lib/iris/tests/unit/plot/test_contourf.py | 1 | 3221 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.plot.contourf` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
from unittest import mock
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import MixinCoords, TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contourf(self.cube, coords=("bar", "str_coord"))
self.assertPointsTickLabels("yaxis")
def test_xaxis_labels(self):
iplt.contourf(self.cube, coords=("str_coord", "bar"))
self.assertPointsTickLabels("xaxis")
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=("bar", "str_coord"))
plt.close(fig)
self.assertPointsTickLabels("yaxis", ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=("str_coord", "bar"))
plt.close(fig)
self.assertPointsTickLabels("xaxis", ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contourf, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord("foo").points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord("bar").points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
mocker = mock.Mock(alpha=0, antialiased=False)
self.mpl_patch = self.patch(
"matplotlib.pyplot.contourf", return_value=mocker
)
self.draw_func = iplt.contourf
@tests.skip_plot
class TestAntialias(tests.IrisTest):
def test_skip_contour(self):
# Contours should not be added if data is all below second level. See #4086.
cube = simple_2d()
levels = [5, 15, 20, 200]
colors = ["b", "r", "y"]
iplt.contourf(cube, levels=levels, colors=colors, antialiased=True)
ax = plt.gca()
# Expect 3 PathCollection objects (one for each colour) and no LineCollection
# objects.
for collection in ax.collections:
self.assertIsInstance(
collection, matplotlib.collections.PathCollection
)
self.assertEqual(len(ax.collections), 3)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
carrillo/scikit-learn | sklearn/manifold/t_sne.py | 48 | 20644 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
rinze/kaggle-public | instacart/most_common_items.py | 1 | 2365 | import sqlite3
import pandas as pd
import csv
import gzip
from collections import defaultdict
if __name__ == '__main__':
conn = sqlite3.connect('data/instacart.db')
c = conn.cursor()
# Obtain the average number of sales per customer
q = """
SELECT user_id, AVG(n_items) as avg_items
FROM (
SELECT o.user_id AS user_id, o.order_id, COUNT(*) as n_items
FROM order_products__prior opp
JOIN orders AS o ON o.order_id = opp.order_id
GROUP by o.user_id, o.order_id
) b
GROUP BY user_id
"""
# This does the same (?; re-check) and is much faster
q = """
SELECT user_id,
MIN(n_items) AS min_items,
AVG(n_items) AS avg_items,
MAX(n_items) AS max_items
FROM orders o
INNER JOIN (SELECT order_id, COUNT(*) AS n_items
FROM order_products__prior
GROUP BY order_id) avg ON avg.order_id = o.order_id
GROUP BY user_id
"""
print "Getting order stats..."
c.execute(q)
order_stats = dict()
print "Assigning to dictionary..."
for row in c:
order_stats[row[0]] = (row[1], row[2], row[3])
# For every customer, sort the bought items in descending popularity
q = """
SELECT o.user_id AS user_id,
opp.product_id AS product_id,
COUNT(*) AS n -- improve here, probably
FROM order_products__prior opp
JOIN orders o ON o.order_id = opp.order_id
GROUP BY o.user_id, opp.product_id
ORDER BY o.user_id, n DESC
"""
print "Getting product frequency..."
c.execute(q)
print "Assigning next order per user..."
next_order = defaultdict(list)
for row in c:
if len(next_order[row[0]]) < order_stats[row[0]][1]: # more than the average
next_order[row[0]].append(row[1])
# Now just let's assign orders
print "Generating CSV file..."
q = "SELECT order_id, user_id FROM orders WHERE eval_set = 'test'"
c.execute(q)
result = []
result.append(['order_id', 'products'])
for row in c:
result.append([row[0], " ".join([str(x) for x in next_order[row[1]]])])
# Write compressed CSV file
with gzip.open('/tmp/submission.csv.gz', 'wb') as f:
csvwriter = csv.writer(f, delimiter = ',', quotechar = '"')
for row in result:
csvwriter.writerow(row)
| gpl-2.0 |
evanl/perc | perc_objects.py | 1 | 34291 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import random
import sys
import bisect
import read_eclipse as re
import eclipse_cells as ec
from time import time, clock
import csv
class Perc(object):
def __init__(self, nx, ny, nz, r_max = 10, volume_fraction = 1.0):
if nx >=3 and ny >=3:
self.nx = nx
self.ny = ny
else:
fail('expected nx >=3 and ny >=3, \n got \
nx = %d, ny= %d' %\
nx, ny)
if nz == 1:
self.nz = nz
elif nz >=3:
self.nz = nz
else:
fail('expected nz = 1 for 2d simulation or\
nz >=3 for 3d simulation \n \
got nz = %d' % nz)
self.r_max = r_max
self.x = {}
self.y = {}
self.z = {}
self.thres_z = {}
self.corners = {}
self.perm = {}
self.poro = {}
self.volume = {}
self.grid_values = {}
self.fill_steps = []
self.fill_times = []
self.candidates = [] #keep sorted
self.sbres = 0.2
self.scmax = 1 - self.sbres
self.vfrac = volume_fraction
def add_injection(self, mass_inflow, end_time_days, \
density):
self.inj = self.Injection(mass_inflow, end_time_days, \
density)
def add_volume(self, choice):
vol = self.vfrac * self.poro[choice] *\
self.scmax * self.volume[choice]
return vol
class Injection(object):
def __init__(self, mass_inflow, end_time_days, density):
self.t_elapsed = 1998 * 365.25 # days
self.q_index = 0
self.t_end = end_time_days + self.t_elapsed
self.rho = density
self.massflow = mass_inflow
#conversion factor
self.mr_kg_sec = []
self.q = []
self.end_days = []
for i in range(len(self.massflow)):
self.mr_kg_sec.append(self.massflow[i] * 31.71)
self.q.append(self.massflow[i] * 31.71 / self.rho) # -> m^3/s
self.end_days.append((1999 + i) * 365.25)
self.injected_mass = 0.
self.injected_volume = 0.
msum = 0.
for i in range(len(self.massflow)):
msum += self.massflow[i]
massflow_avg = msum / float(len(self.massflow))
self.max_mass = end_time_days * massflow_avg* 31.71 * 24 * 3600.
self.max_volume = self.max_mass / self.rho
def add_time(self, t_add):
self.t_elapsed += t_add
return 0
def add_mass(self, vol_add):
self.injected_volume += vol_add
mass_add = self.rho * vol_add
self.injected_mass += mass_add
time_taken = vol_add / (self.q[self.q_index] * 24 * 3600)
# add time in days ^^^
time_taken_1 = mass_add / (self.mr_kg_sec[self.q_index] * 24 * 3600)
self.add_time(time_taken)
if self.get_elapsed_time() > self.end_days[self.q_index] and \
self.q_index <= len(self.end_days) -1:
self.increment_q_index()
return 0
def increment_q_index(self):
self.q_index += 1
return 0
def get_elapsed_time(self):
return self.t_elapsed
def get_max_mass(self):
return self.max_mass
def get_injected_mass(self):
return self.injected_mass
def get_injected_volume(self):
return self.injected_volume
def get_density(self):
return self.rho
def get_mass_inflow(self):
return self.massflow
def get_end_time(self):
return self.t_end
def end_reached(self):
if self.t_elapsed > self.t_end:
return True
else:
return False
class Corner(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_z(self):
return self.z
def get_grid_value(self, key):
""" returns the value for a given cell
creates this value if it doesn't exist.
"""
#if key not in self.grid_values:
#self.set_grid_value(key)
return self.grid_values[key]
def set_grid_value(self, key, val = 'random'):
""" sets grid value, sets value as not filled
"""
if val == 'random':
self.grid_values[key] = random.randint(1, self.r_max)
else:
self.grid_values[key] = val
def mark_filled(self, key, time = '1'):
""" marks grid values as filled if they are
within the bounds of the grid size
"""
assert 0 <= key[0] < self.nx, \
'i coordinate out of range(%d vs %d)' % \
(key[0], self.nx)
assert 0 <= key[1] < self.ny, \
'j coordinate out of range(%d vs %d)' % \
(key[1], self.ny)
if self.nz == 1:
assert key[2] == 0, 'k must equal zero'
else:
assert 0 <= key[2] < self.nz,\
'k coordinate out of range (%d vs %d)' % \
(key[2], self.nz)
self.fill_steps.append(key)
self.fill_times.append(time)
def find_new_candidates(self):
""" grabs neighbor cell values, inserts them into sorted list
"""
key = self.fill_steps[-1]
new_can = self.get_neighbor_candidates(key)
for can in new_can:
bisect.insort_left(self.candidates, (self.grid_values[can], can))
return self.candidates
def get_neighbor_candidates(self, key):
""" checks neighbor candidates, ignores if already in list
"""
neighbors = self.get_neighbor_keys(key)
candidates = []
for key in neighbors:
if key not in self.fill_steps:
candidates.append(key)
return candidates
def get_neighbor_keys(self, key):
""" Checks six sides of neighbors for 3d case
Checks four sides of neighbors for the 2d case
"""
keys = []
keys.append((key[0] - 1, key[1], key[2]))
keys.append((key[0] + 1, key[1], key[2]))
keys.append((key[0], key[1] - 1, key[2]))
keys.append((key[0], key[1] + 1, key[2]))
if self.nz != 1:
keys.append((key[0], key[1], key[2] - 1))
keys.append((key[0], key[1], key[2] + 1))
return keys
def end_criterion(self, end_type = 'boundary'):
if end_type == 'boundary':
if self.choice[0] in (0, self.nx-1) \
or self.choice[1] in (0, self.ny-1):
print "x-y Boundary hit "
return True
elif self.nz != 1 and self.choice[2] in (0, self.nz-1):
return True
else:
return False
elif end_type == 'injection':
end_time = self.inj.get_end_time()
elapsed = self.inj.get_elapsed_time()
if elapsed > end_time:
print "end criterion"
print "time elapsed: " + str(elapsed)
print " end time: " + str(end_time)
return True
elif self.end_criterion(end_type = 'boundary'):
return True
else:
return False
def run_simulation(self, injection = False):
""" fills grid. If no initial value is specified, picks
i, j, k == nx/2, ny/2, nz/2
"""
if injection == True:
end_type = 'injection'
else:
end_type = 'boundary'
print "PERCOLATING........"
step_count = 0
while True:
step_count +=1
self.candidates = self.find_new_candidates()
assert self.candidates, 'no fillable cells found'
self.choice = self.percolate()
time = step_count
if injection == True:
volume_filled = self.add_volume(self.choice)
self.inj.add_mass(volume_filled)
time = self.inj.get_elapsed_time()
self.mark_filled(self.choice, time = time)
if self.end_criterion(end_type = end_type):
print "Number of Cells filled: " + \
str(len(self.fill_steps))
print "mass in system : " + \
str(self.inj.get_injected_mass())
print "maximum mass : " + \
str(self.inj.get_max_mass())
break
return 0
def percolate(self):
choice = self.candidates[0][1]
#print choice, '{:.3e}'.format(self.grid_values[choice]),\
#" runner up -> ", self.candidates[1][1], \
#'{:.3e}'.format(self.grid_values[self.candidates[1][1]]),\
#" end", '{:.3e}'.format(self.grid_values[self.candidates[-1][1]])
self.candidates.remove(self.candidates[0])
return choice
def make_uniform_grid(self):
print "making uniform grid"
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
key = (i, j, k)
self.set_grid_value(key)
self.x[key] = i
self.y[key] = j
self.z[key] = k
if len(self.fill_steps) == 0:
init_key = (self.nx/2, self.ny/2, self.nz/2)
self.mark_filled(init_key)
print "grid with: (nx, ny, nz) = ", \
(self.nx, self.ny, self.nz), " made!"
return 0
def make_sleipner_grid(self, vol_dict, xyz_dict, poroperm_dict):
""" sets :
self.x
self.y
self.z
self.poro
self.perm
"""
t0 = clock()
print "making Sleipner grid"
self.nx = 65
self.ny = 119
self.nz = 43
base_elev = xyz_dict[(32, 77, 34)][2]
for i in range(self.nx):
for j in range(self.ny):
for k in range(self.nz):
key = (i, j, k)
vol = vol_dict[key]
x = xyz_dict[key][0]
y = xyz_dict[key][1]
z = xyz_dict[key][2]
poro = poroperm_dict[key][0]
perm = poroperm_dict[key][1]
self.x[key] = x
self.y[key] = y
self.z[key] = z
self.thres_z[key] = base_elev - z
if j <=49:
boost = 0.0
self.z[key] += boost
self.thres_z[key] += boost
self.volume[key] = vol
self.poro[key] = poro
self.perm[key] = perm
#if perm > 0.1:
#self.perm[key] = 2000.
val = self.perc_threshold(key) + 1. * pow(10,5.)
#if i == 32 and j == 77:
#print '{:d}, {:.3e}, {:.3e}'.format(k, perm, val)
self.set_grid_value(key, val = val)
if len(self.fill_steps) == 0:
init_key = (32, 77, 34)
self.mark_filled(init_key, time = 1998. * 365.25 )
print "grid with: (nx, ny, nz) = ", \
(self.nx, self.ny, self.nz), " made in "
print clock() - t0, " seconds"
return 0
def contour_topo(self):
fig = plt.figure(figsize = (9., 12.))
ax = fig.add_subplot(111)
x = []
y = []
elev = []
for i in range(65):
b2 = []
b3 = []
blank = []
#if i >= 35 and i < 50:
for j in range(119):
#if j >= 45 and j < 75:
b2.append(self.x[(i, j, 2)])
b3.append(self.y[(i, j, 2)])
blank.append(self.z[(i, j, 2)])
elev.append(blank)
x.append(b2)
y.append(b3)
xp = np.asarray(x)
yp = np.asarray(y)
elp = np.asarray(elev)
N = 10
c = ax.contourf(xp, yp, elp, N)
cb = plt.colorbar(c, format='%.2f')
cb.set_ticks(np.linspace(np.amin(elp), np.amax(elp), N))
cb.set_label('elev [m]')
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
#plt.savefig('topo.png')
def perc_threshold(self, key):
# TODO
# ioannidis et al 1996.
c = 0.186
c = pow(10.,8.)
sigma = 0.045
#c = 1.
#sigma = 1.
pcd = c * sigma * \
pow(self.perm[key] / self.poro[key], -1/2.)
rho_b = 1019.
g = 9.81
delta_rho = rho_b - self.inj.get_density()
pgrav = delta_rho * g * (self.thres_z[key])
if key[0] == 32 and key[1] == 77:
a = 1
print "k, pcd, pgrav", "perm"
print '{:d}, {:3e}, {:3e}, {:3e}'.format(key[2], pcd, \
pgrav, pcd + pgrav)
return pcd + pgrav
def get_time_index_gravseg(self):
time_days = 0.
n = 0
time_indices = []
for i in range(1, len(self.fill_steps)):
key = self.fill_steps[i]
key0 = self.fill_steps[i-1]
if key[2] == 2 and key0[2] != 2:
time_indices.append(i)
n = time_indices[0]
time_days = self.fill_times[n]
return n, time_days
def get_plan_year_indices(self, years):
yr_indices = []
for year in years:
yr_days = (year) * 365.25
for n in range(0, len(self.fill_times)):
yr_ind = 0
if n > 0 and \
self.fill_times[n] > yr_days and \
self.fill_times[n-1] < yr_days:
yr_ind = n
yr_indices.append(yr_ind)
return yr_indices
def plot_sleipner_thick_contact(self, years, gwc = False, sim_title = ''):
if gwc == True:
tc_str = 'contact'
else:
tc_str = 'thickness'
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(10.0, 2.5), dpi = 960)
middle = len(years) * 10
pos = 100 + middle
for n in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
xf = []
yf = []
kf = []
for i in range(self.nx):
tempx = []
tempy = []
tempk = []
for j in range(self.ny):
x = self.x[(i, j, 0)]
y = self.y[(i, j, 0)]
tn = yr_indices[n]
thick, contact = self.get_thick_contact(i, j, tn)
tempx.append(x)
tempy.append(y)
if gwc == True:
tempk.append(contact)
else:
tempk.append(thick)
xf.append(tempx)
yf.append(tempy)
kf.append(tempk)
xp = np.asarray(xf)
yp = np.asarray(yf)
kp = np.asarray(kf)
N = 10
contour_label = False
ax_label = False
c = ax.contourf(xp, yp, kp, N)
plt.tick_params(which='major', length=3, color = 'w')
if n == len(years) - 1:
fig.subplots_adjust(right=0.84)
cb_axes = fig.add_axes([0.85, 0.15, 0.05, 0.7])
plt.tick_params(which='major', length=3, color = 'k')
cb = fig.colorbar(c, cax = cb_axes, format = '%.2f')
cb.set_ticks(np.linspace(np.amin(kp), np.amax(kp), N))
cb.set_label(tc_str + ': [m]')
if n != 0:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_title(str(years[n]))
ax.axis([0, 3000, 0, 6000])
ax.xaxis.set_ticks(np.arange(0,3500,1000))
plt.savefig(sim_title + '_' + tc_str + '.pdf', fmt = 'pdf')
plt.clf()
return 0
def plot_sleipner_plume(self, years, sim_title = 'sleipner_perc'):
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(16.0, 5), dpi=960)
middle = len(years) * 10
pos = 100 + middle
for i in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
xf = []
yf = []
kf = []
for n in range(yr_indices[i]):
key = self.fill_steps[n]
#if key[0] >= 35 and key[0] < 50:
#if key[1] >= 45 and key[1] < 75:
xf.append(self.x[key])
yf.append(self.y[key])
kf.append(key[2])
if 50 == key[1]:
key1 = (key[0], key[1]-1, key[2])
xp = np.asarray(xf)
yp = np.asarray(yf)
sc = ax.scatter(xp, yp, s=20, c=kf)
ax.set_title(str(years[i]))
ax.axis([0, 3000, 0, 6000])
ax.xaxis.set_ticks(np.arange(0, 3000, 1500))
if i != 0:
ax.set_yticklabels([])
#elif i == 5:
#cb_axes = self.fig.add_axes([0.85, 0.15, 0.05, 0.7])
#fig.colorbar(sc, cax = cb_axes)
plt.savefig(sim_title + '_plume.pdf', fmt = 'pdf')
plt.clf()
return 0
def plot_sleipner_cross_section(self, years, sec_index = 32):
yr_indices = self.get_plan_year_indices(years)
size = 14
font = {'size' : size}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(16.0, 5))
pos = 150
top = []
bot = []
ybound = []
for key in self.x.keys():
if key[0] == sec_index:
t, b = self.get_boundary_zs(key[0], key[1])
top.append(t)
bot.append(b)
ybound.append(self.y[key])
for i in range(len(yr_indices)):
pos +=1
ax = fig.add_subplot(pos)
yf = []
zf = []
for n in range(yr_indices[i]):
key = self.fill_steps[n]
if key[0] == sec_index:
yf.append(self.y[key])
zf.append(self.z[key])
yp = np.asarray(yf)
zp = np.asarray(zf)
tp = np.asarray(top)
bp = np.asarray(bot)
yb = np.asarray(ybound)
tl = ax.scatter(yb, tp, s=5, c='r')
bl = ax.scatter(yb, bp, s=5, c='g')
sc = ax.scatter(yp, zp, s=10)
ax.set_title(str(years[i]))
ax.axis([0, 6000, -815, -800])
ax.xaxis.set_ticks(np.arange(0, 6000, 1500))
if i != 0:
ax.set_yticklabels([])
plt.savefig('sleipner_cross_section.png')
plt.clf()
return 0
def contour_top_boundary(self):
top = []
x = []
y = []
top = []
for i in range(self.nx):
xinter = []
yinter = []
tinter = []
for j in range(self.ny):
key = (i, j, 2)
xinter.append(self.x[key])
yinter.append(self.y[key])
tinter.append(self.z[key])
x.append(xinter)
y.append(yinter)
top.append(tinter)
xp = np.asarray(x)
yp = np.asarray(y)
tp = np.asarray(top)
fig = plt.figure(figsize=(8.5,11))
ax = fig.add_subplot(111)
N = 50
cs_val = ax.contour(xp, yp, tp, N)
cb_val = plt.colorbar(cs_val, shrink = 0.8,\
extend='both')
cb_val.set_label('Top Boundary [z]')
fig.savefig('top_boundary.png', bbox_inches='tight', format='png')
return 0
def make_scatter_plan_t0_tn(self, t0, tn):
n = tn-t0
x = np.zeros(n)
y = np.zeros(n)
for n in range(t0, tn):
key = self.fill_steps[n]
x[n] = self.x[key]
y[n] = self.y[key]
return x, y
def plot_2d(self, uniform_grid = True):
print "PLOTTING..........."
f = plt.figure()
ax = f.add_subplot(111)
# make base grid of cells
if uniform_grid == True:
pts = []
xs = []
ys = []
for i in [0, self.nx-1]:
for j in [0, self.ny-1]:
key = (i, j, 0)
xs.append(self.x[key])
ys.append(self.y[key])
xp = np.asarray(xs)
yp = np.asarray(ys)
ax.scatter(xp, yp, s=30, c='w', marker='s')
# go through steps and figure out times
xf = []
yf = []
tf = []
tmin = self.fill_times[0]
tmax = self.fill_times[-1]
for i in range(0, len(self.fill_steps)):
key = self.fill_steps[i]
xf.append(self.x[key])
yf.append(self.y[key])
tf.append(self.fill_times[i])
ax.set_xlabel('x')
ax.set_ylabel('y')
xfp = np.asarray(xf)
yfp = np.asarray(yf)
cm = plt.get_cmap('bone_r')
sc = ax.scatter(xfp, yfp, c = tf, vmin=tmin, vmax=tmax, s = 300, cmap=cm)
plt.colorbar(sc)
plt.savefig('sleipner_2d.png')
#plt.show()
def get_boundary_zs(self, i, j):
for k in range(1, self.nz):
key0 = (i, j, k-1)
key1 = (i, j, k)
if self.perm[key0] < 1. and self.perm[key1] > 1.:
ztop = self.z[key1]
elif self.perm[key0] > 1. and self.perm[key1] < 1.:
zbot = self.z[key0]
return ztop, zbot
def get_thick_contact(self, i, j, time_index):
column = []
for key in self.fill_steps[:time_index]:
if key[0] == i and key[1] == j:
column.append(self.z[key])
column.sort()
if len(column) == 0:
thick = 0.
contact = -812.
else:
thick = column[-1] - column[0] + 0.52
contact = column[0]
if contact < -812.:
contact = -812.
return thick, contact
def plot_3d(self, uniform_grid = True):
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
if uniform_grid == True:
pts = []
xs = []
ys = []
zs = []
for i in [0, self.nx-1]:
for j in [0, self.ny-1]:
for k in [0, self.nz-1]:
key = (i, j, k)
xs.append(self.x[key])
ys.append(self.y[key])
zs.append(self.z[key])
xp = np.asarray(xs)
yp = np.asarray(ys)
zp = np.asarray(zs)
ax.scatter(xp, yp, zp, s=30, c='w', marker='s')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
xf = []
yf = []
zf = []
tf = []
tmin = self.fill_times[0]
tmax = self.fill_times[-1]
for i in range(0, len(self.fill_steps)):
key = self.fill_steps[i]
xf.append(self.x[key])
yf.append(self.y[key])
zf.append(self.z[key])
tf.append(self.fill_times[i])
xfp = np.asarray(xf)
yfp = np.asarray(yf)
zfp = np.asarray(zf)
cm = plt.get_cmap('bone_r')
sc = ax.scatter(xfp, yfp, zfp, \
c = tf, vmin=tmin, vmax=tmax, s = 300, cmap=cm)
plt.colorbar(sc)
#plt.show()
return 0
def make_sleipner_csv(self):
e_cells, nx, ny, nz = re.read_eclipse()
f = open('sl_data.csv', 'w')
for i in range(nx):
for j in range(ny):
for k in range(nz):
key = (i, j, k)
ind = self.e_cell_index(i, j, k)
oc = e_cells[ind].getCorners()
corners = []
for c in oc:
x, y = c.getXY()
# FLIPPING ALL ZS IN THIS.
z = - c.getZ()
nc = self.Corner(x, y, z)
corners.append(nc)
self.corners[key] = corners
x = self.get_x_centroid(corners)
y = self.get_y_centroid(corners)
z = self.get_z_centroid(corners)
poro = e_cells[ind].getPorosity()
perm = e_cells[ind].getXPermeability()
volume = self.get_volume(x, y, z, corners)
vol_s = str(volume)
x_s = str(x)
y_s = str(y)
z_s = str(z)
poro_s = str(poro)
perm_s = str(perm)
f.write(', '.join([str(i), str(j), str(k), \
vol_s, x_s, y_s, z_s, poro_s, perm_s]))
f.write('\n')
f.close()
return 0
def read_sleipner_csv(self):
with open('sl_data.csv', 'rb') as csvfile:
vol_dict = {}
xyz_dict = {}
poroperm_dict = {}
rd = csv.reader(csvfile, delimiter = ',')
for row in rd:
key = (int(row[0]), int(row[1]), int(row[2]))
vol_dict[key] = float(row[3])
xyz_dict[key] = (float(row[4]), float(row[5]), float(row[6]))
poroperm_dict[key] = (float(row[7]), float(row[8]))
csvfile.close()
return vol_dict, xyz_dict, poroperm_dict
def e_cell_index(self, i, j, k):
nx = 65
ny = 119
return i + nx * j + nx * ny * k
def get_x_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_x()
return sum_c / count
def get_y_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_y()
return sum_c / count
def get_z_centroid(self, corners):
count = 0.
sum_c = 0.
for c in corners:
count += 1.
sum_c += c.get_z()
return sum_c / count
def get_dx(self, eleme, direc):
""" returns the length of a grid cell in a particular direction.
dir is either 1, 2 or 3 for x, y and z directions.
i, j and k are the indices
"""
if direc == 1 :
corners = self.corners[eleme]
dx = corners[0].get_x() - corners[1].get_x()
return dx
elif direc == 2 :
corners = self.corners[eleme]
dy = corners[0].get_y() - corners[2].get_y()
return dy
elif direc == 3 :
z1 = abs(e_cells[self.e_cell_index(i,j,k)].getTopZ() - \
e_cells[self.e_cell_index(i,j,k)].getBottomZ())
return z1
else:
raise Exception("Invalid direction, \n" + \
" Please specify 1, 2 or 3.\n")
def get_volume(self, x, y, z, corners):
""" uses the equation for volume of an orientable polyhedron
V = 1/3 \sum_i x_i \dot n^hat_i A_i
"""
face_map = ['west', 'south', 'east', 'north', 'bot', 'top']
v_sum = 0.0
for face in face_map:
a = self.get_area(corners, face)
centroid = self.get_face_center(x, y, z, corners, face)
cent = np.asarray(centroid)
vec = self.get_normal_vector(x, y, z, corners, face)
v_sum += np.dot(cent, vec) * a
vol = 1./3. * v_sum
return vol
def get_area(self, corners, face):
""" returns the area of a cell face, east, west, etc
"""
if face == 'west':
x1 = corners[2].get_y()
x2 = corners[0].get_y()
y1 = corners[2].get_z()
y2 = corners[0].get_z()
y3 = corners[6].get_z()
y4 = corners[4].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'south':
x1 = corners[2].get_x()
x2 = corners[3].get_x()
y1 = corners[2].get_z()
y2 = corners[3].get_z()
y3 = corners[6].get_z()
y4 = corners[7].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'east':
x1 = corners[3].get_y()
x2 = corners[1].get_y()
y1 = corners[3].get_z()
y2 = corners[1].get_z()
y3 = corners[7].get_z()
y4 = corners[5].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'north':
x1 = corners[0].get_x()
x2 = corners[1].get_x()
y1 = corners[0].get_z()
y2 = corners[1].get_z()
y3 = corners[4].get_z()
y4 = corners[5].get_z()
area = -self.get_area_side(x1, x2, y1, y2, y3, y4)
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0],2.) + pow(c[1],2.) + 1)
x1 = corners[2].get_x()
x2 = corners[3].get_x()
y1 = corners[2].get_y()
y2 = corners[0].get_y()
area = mag * ((x2 * y2 - x1 * y2) - (x2 * y1 - x1 * y1))
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0],2.) + pow(c[1],2.) + 1)
x1 = corners[6].get_x()
x2 = corners[7].get_x()
y1 = corners[6].get_y()
y2 = corners[4].get_y()
area = mag * ((x2 * y2 - x1 * y2) - (x2 * y1 - x1 * y1))
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
return area
def get_face_center(self, xc, yc, zc, corners, face):
""" center vector location relative to polyhedron center
"""
if face == 'west':
nc = [corners[0], corners[2], corners[4], corners[6]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'south':
nc = [corners[2], corners[3], corners[6], corners[7]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
a = 2
elif face == 'east':
nc = [corners[3], corners[1], corners[7], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'north':
nc = [corners[0], corners[1], corners[4], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
xf = self.get_x_centroid(nc)
yf = self.get_y_centroid(nc)
zf = self.get_z_centroid(nc)
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
vec = [xf - xc, yf - yc, zf - zc]
return vec
def get_normal_vector(self, x, y, z, corners, face):
""" gets normal vector of face
"""
if face == 'west':
vec = [-1., 0., 0.]
elif face == 'south':
vec = [0., -1., 0.]
elif face == 'east':
vec = [1., 0., 0.]
elif face == 'north':
vec = [0., 1., 0.]
elif face == 'bot':
nc = [corners[6], corners[7], corners[4], corners[5]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0], 2.) + pow(c[1],2.) + 1)
vec = [c[0]/mag, c[1]/mag, -1./mag]
elif face == 'top':
nc = [corners[2], corners[3], corners[0], corners[1]]
c, resid, rank, sigma = self.fit_plane(nc)
mag = np.sqrt(pow(c[0], 2.) + pow(c[1],2.) + 1)
vec = [-c[0]/mag, -c[1]/mag, 1./mag]
else:
raise Exception("Invalid Face, please specify" + \
"one of the six faces in face_map \n\n")
return vec
def fit_plane(self, corners):
""" takes four corner points and fits a plane least squares to them
returns in form z = c[0] x + c[1] y + c[2]
"""
x = []
y = []
z = []
for c in corners:
x.append(c.get_x())
y.append(c.get_y())
z.append(c.get_z())
x = np.asarray(x)
y = np.asarray(y)
z = np.asarray(z)
A = np.column_stack((x, y, np.ones(x.size)))
c, resid, rank, sigma = np.linalg.lstsq(A, z)
return c, resid, rank, sigma
def get_area_side(self, x1, x2, y1, y2, y3, y4):
h = x2 - x1
b1 = y4 - y2
b2 = y3 - y1
return 0.5 * h * (b1 + b2)
def fail(msg):
'''print error and quit'''
print >> sys.stderr, msg
sys.exit(1)
| mit |
roshchupkin/hase | hdgwas/converter.py | 1 | 12670 | import os
import tables
import h5py
from numpy import genfromtxt
import bitarray as ba
import numpy as np
import gc
import subprocess
from tools import Timer
import pandas as pd
from data import MINIMACHDF5Folder
import shutil
import glob
import signal
class Genotype(object):
def __init__(self):
self.file_name = None
self.reader=None
self.probes = None
self.individuals = None
self.genotype = None
self.out=None
class GenotypeHDF5(Genotype):
def __init__(self, name,force=True):
super(GenotypeHDF5, self).__init__()
self.h5_name = '%s.h5' % name
self.file_name = name
self.pytable_filters = tables.Filters(complevel=9, complib='zlib')
self.h5_gen_file = None
self.h5_ind_file = None
self.h5_pr_file = None
self.gen_iter=0
def write_data(self, type, overwrite=True):
type_dic={'gen':['genotype',self.h5_gen_file],
'ind':['individuals',self.h5_ind_file],
'pr':['probes',self.h5_pr_file]}
if (not overwrite) and os.path.isfile( os.path.join(self.out,type_dic[type][0],self.h5_name) ):
print('File %s found. Please remove manually.' % self.h5_name)
return
else:
if type=='pr':
self.h5_pr_file = tables.open_file(os.path.join(self.out,type_dic[type][0],self.h5_name), 'w',
title=self.file_name)
self.h5_pr_file.close() #need to close file before join data
elif type=='ind':
self.h5_ind_file = tables.open_file(os.path.join(self.out,type_dic[type][0],self.h5_name), 'w',
title=self.file_name)
elif type=='gen':
self.h5_gen_file = tables.open_file(os.path.join(self.out,type_dic[type][0],str(self.gen_iter)+'_'+self.h5_name),
'w', title=self.file_name)
self.gen_iter+=1
def close(self):
self.h5_gen_file.close()
self.h5_ind_file.close()
self.h5_pr_file.close()
def summary(self):
raise (NotImplementedError)
class GenotypePLINK(GenotypeHDF5):
def __init__(self, name, reader=None):
super(GenotypePLINK, self).__init__(name)
self.reader=reader
self.split_size=None
def convert_individuals(self):
individuals=self.reader.folder.get_fam()
self.h5_ind_file.create_table(self.h5_ind_file.root, 'individuals', individuals,
title='Individuals', filters=self.pytable_filters)
self.h5_ind_file.root.individuals[:] = individuals
self.individuals = self.h5_ind_file.root.individuals[:]
self.n_ind=len(individuals)
#@profile
def convert_probes(self, chunk_size=100000):
if os.path.isfile(os.path.join(self.out,'probes',self.h5_name)):
os.remove(os.path.join(self.out,'probes',self.h5_name))
hash_table={'keys':np.array([],dtype=np.int),'allele':np.array([])}
i=0
chunk=np.array([])
while True:
chunk=self.reader.folder.get_bim(chunk_size)
if isinstance(chunk,type(None)):
break
print i
chunk.columns=['CHR', 'ID', 'distance', 'bp', 'allele1', 'allele2']
hash_1=chunk.allele1.apply(hash)
hash_2=chunk.allele2.apply(hash)
k,indices=np.unique(np.append(hash_1,hash_2),return_index=True)
s=np.append(chunk.allele1,chunk.allele2)[indices]
ind=np.invert(np.in1d(k,hash_table['keys']))
hash_table['keys']=np.append(hash_table['keys'],k[ind])
hash_table['allele']=np.append(hash_table['allele'],s[ind])
chunk.allele1=hash_1
chunk.allele2=hash_2
# WARNING!!! doesn't work on windows
chunk.to_hdf(os.path.join(self.out,'probes',self.h5_name), key='probes',format='table',data_columns=True, append=True,
complib='zlib',complevel=9, min_itemsize = 45)
gc.collect()
i+=1
pd.DataFrame.from_dict(hash_table).to_csv(os.path.join(self.out,'probes',self.file_name+'_hash_table.csv.gz'),index=False,compression='gzip', sep='\t')
print('Number of Probes {} converted'.format(self.reader.folder.N_probes))
#@profile
def convert_genotypes(self):
chunk_size=self.split_size
if chunk_size is None:
raise ValueError('CONVERTER_SPLIT_SIZE does not define in config file!')
G=np.array([])
#self.reader.folder.processed=0
while True:
with Timer() as t:
G=self.reader.folder.get_bed(chunk_size)
if isinstance(G,type(None)):
break
print ('Time to read {} SNPs is {} s'.format(G.shape[0], t.secs))
self.write_data('gen')
atom = tables.Int8Atom()
self.genotype = self.h5_gen_file.create_carray(self.h5_gen_file.root, 'genotype', atom,
(G.shape),
title='Genotype', filters=self.pytable_filters)
with Timer() as t:
self.genotype[:] = G
print ('Time to write {} SNPs is {} s'.format(G.shape[0], t.secs))
self.h5_gen_file.close()
G=None
gc.collect()
def plink2hdf5(self,out, force=True):
if force:
try:
os.mkdir(os.path.join(out,'genotype') )
os.mkdir(os.path.join(out,'individuals') )
os.mkdir(os.path.join(out,'probes') )
except:
print('Directories "genotype","probes","individuals" are already exist in {}...'.format(self.out))
self.out=out
self.write_data('ind')
self.convert_individuals()
self.h5_ind_file.close()
self.reader.folder.processed=0
self.write_data('pr')
self.convert_probes()
self.reader.folder.processed=0
self.convert_genotypes()
def _summary(self, head=10):
pass #TODO (low) rewrite to get statistic not load to memory
print('Number of Probes: %d' % None)
print('Number of Individuals: %d' % None)
print('The genotype matrix is of size %d by %d' % None)
class GenotypeMINIMAC(object):
def __init__(self, name, reader=None):
self.reader=reader
self.study_name=name
self.split_size=None
self.hdf5_iter=0
self.pytable_filter=tables.Filters(complevel=9, complib='zlib')
self.cluster=False
def save_hdf5_chunk(self,data,out,name):
print 'Saving chunk...{}'.format(os.path.join(out,'genotype',str(self.hdf5_iter)+'_'+name+'.h5'))
h5_gen_file = tables.open_file(
os.path.join(out,'genotype',str(self.hdf5_iter)+'_'+name+'.h5'), 'w', title=name)
atom = tables.Float16Atom() # TODO (low) check data format
genotype = h5_gen_file.create_carray(h5_gen_file.root, 'genotype', atom,
(data.shape),
title='Genotype',
filters=self.pytable_filter)
genotype[:] = data
h5_gen_file.close()
genotype=None
data=None
gc.collect()
self.hdf5_iter+=1
def MACH2hdf5(self, out, id=False,remove_id=False):
FNULL = open(os.devnull, 'w')
subprocess.call(['bash',os.path.join(os.environ['HASEDIR'],'tools','minimac2hdf5.sh'),
self.reader.folder.path, out , os.environ['HASEDIR'], self.study_name ], shell=False,stderr=subprocess.STDOUT)
if id:
if self.cluster:
ind=pd.read_hdf(os.path.join(out,'individuals',self.study_name+'.h5'),'individuals').individual
N=ind.shape[0]
print 'Submit to cluster!'
cmd="qsub -sync y -t 1-{} {} {}".format(N,os.path.join(os.environ['HASEDIR'],'tools','qsub_helper.sh'),os.path.join( out,'id_convert.sh' ))
print cmd
proc=subprocess.Popen(cmd, shell=True,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()
else:
proc=subprocess.Popen(['bash',os.path.join( out,'id_convert.sh' ) ], shell=False,stderr=subprocess.STDOUT)
print proc.communicate()
self.folder=MINIMACHDF5Folder(out,self.study_name)
self.folder.pool.split_size=self.split_size
self.folder.pool.chunk_size=self.split_size
print('Start to convert id files to chunk files...')
while True:
data=self.folder.get_next()
if data is None:
break
self.save_hdf5_chunk(data,out,self.study_name)
gc.collect()
print('Finished')
if remove_id:
self.folder.pool.remove(type='all')
else:
try:
os.mkdir(os.path.join(out,'id_genotype') )
except:
print 'id_genotype folder already exist'
self.folder.pool.move(os.path.join(out,'id_genotype'),type='all')
shutil.move(os.path.join(out,'id_convert.sh'), os.path.join(out,'tmp_files','id_convert.sh') )
shutil.move(os.path.join(out,'SUB_FAM.txt'), os.path.join(out,'tmp_files','SUB_FAM.txt') )
shutil.move(os.path.join(out,'SUB_ID.txt'), os.path.join(out,'tmp_files','SUB_ID.txt') )
else:
f=open(os.path.join( out,'minimac_convert.sh' ), 'w')
probes=pd.HDFStore(os.path.join(out,'probes', self.study_name +'.h5'),'r')
N_probes=probes.get_storer('probes').nrows
print 'There are {} probes'.format(N_probes)
chunk=np.vstack(((np.arange(0,N_probes,self.split_size)+1)[:-1],np.arange(0,N_probes,self.split_size)[1:]))
N_jobs=chunk.shape[1]
ch=[0,0]
i_ch = -1
for i_ch in range(chunk.shape[1]):
ch=chunk[:,i_ch]
#print ch
l='bash {} {} {} {} {} {} {} {} \n'.format(
os.path.join(os.environ['HASEDIR'],'tools','minimacGenotype2hdf5.sh'),
self.reader.folder.path,
out,
os.environ['HASEDIR'],
self.study_name,
ch[0],
ch[1],
i_ch
)
f.write(l)
if ch[1]!=N_probes:
l='bash {} {} {} {} {} {} {} {} \n'.format(
os.path.join(os.environ['HASEDIR'],'tools','minimacGenotype2hdf5.sh'),
self.reader.folder.path,
out,
os.environ['HASEDIR'],
self.study_name,
ch[1]+1,
N_probes,
i_ch+1
)
f.write(l)
N_jobs+=1
f.close()
if self.cluster:
print 'Submit to cluster!'
cmd="qsub -sync y -t 1-{} {} {}".format(N_jobs,os.path.join(os.environ['HASEDIR'],'tools','qsub_helper.sh'),os.path.join( out,'minimac_convert.sh' ))
print cmd
proc=subprocess.Popen(cmd, shell=True,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()
else:
proc=subprocess.Popen(['bash',os.path.join( out,'minimac_convert.sh' ) ], shell=False,stderr=FNULL)
print proc.communicate()
shutil.move(os.path.join(out,'minimac_convert.sh'), os.path.join(out,'tmp_files','minimac_convert.sh') )
shutil.move(os.path.join(out,'id_convert.sh'), os.path.join(out,'tmp_files','id_convert.sh') )
shutil.move(os.path.join(out,'SUB_FAM.txt'), os.path.join(out,'tmp_files','SUB_FAM.txt') )
shutil.move(os.path.join(out,'SUB_ID.txt'), os.path.join(out,'tmp_files','SUB_ID.txt') )
shutil.move(os.path.join(out,'files_order.txt'),os.path.join(out,'tmp_files','files_order.txt'))
shutil.move(os.path.join(out, 'info.txt'), os.path.join(out, 'tmp_files', 'info.txt'))
def summary(self):
pass
class GenotypeVCF(object):
def __init__(self, name, reader=None):
self.reader=reader
self.study_name=name
self.split_size=None
self.hdf5_iter=0
self.pytable_filter=tables.Filters(complevel=9, complib='zlib')
self.cluster=False
def VCF2hdf5(self, out):
FNULL = open(os.devnull, 'w')
subprocess.call(['bash',os.path.join(os.environ['HASEDIR'],'tools','VCF2hdf5.sh'),
self.reader.folder.path, out , os.environ['HASEDIR'], self.study_name ], shell=False,stderr=subprocess.STDOUT,
preexec_fn=lambda:signal.signal(signal.SIGPIPE, signal.SIG_DFL)
)
f=open(os.path.join( out,'vcf_convert.sh' ), 'w')
probes=pd.HDFStore(os.path.join(out,'probes', self.study_name +'.h5'),'r')
N_probes=probes.get_storer('probes').nrows
print 'There are {} probes'.format(N_probes)
chunk=np.vstack(((np.arange(0,N_probes,self.split_size)+1)[:-1],np.arange(0,N_probes,self.split_size)[1:]))
N_jobs=chunk.shape[1]
ch=[0,0]
i_ch=-1
for i_ch in range(chunk.shape[1]):
ch=chunk[:,i_ch]
#print ch
l='bash {} {} {} {} {} {} {} {} \n'.format(
os.path.join(os.environ['HASEDIR'],'tools','VCFGenotype2hdf5.sh'),
self.reader.folder.path,
out,
os.environ['HASEDIR'],
self.study_name,
ch[0],
ch[1],
i_ch
)
f.write(l)
if ch[1]!=N_probes:
l='bash {} {} {} {} {} {} {} {} \n'.format(
os.path.join(os.environ['HASEDIR'],'tools','VCFGenotype2hdf5.sh'),
self.reader.folder.path,
out,
os.environ['HASEDIR'],
self.study_name,
ch[1]+1,
N_probes,
i_ch+1
)
f.write(l)
N_jobs+=1
f.close()
if self.cluster:
print 'Submit to cluster!'
cmd="qsub -sync y -t 1-{} {} {}".format(N_jobs,os.path.join(os.environ['HASEDIR'],'tools','qsub_helper.sh'),os.path.join( out,'vcf_convert.sh' ))
print cmd
proc=subprocess.Popen(cmd, shell=True,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()
else:
proc=subprocess.Popen(['bash',os.path.join( out,'vcf_convert.sh' ) ], shell=False,stderr=FNULL)
print proc.communicate()
shutil.move(os.path.join(out,'vcf_convert.sh'), os.path.join(out,'tmp_files','vcf_convert.sh') )
shutil.move(os.path.join(out,'SUB_ID.txt'), os.path.join(out,'tmp_files','SUB_ID.txt') )
shutil.move(os.path.join(out, 'snps_count.txt'), os.path.join(out, 'tmp_files', 'snps_count.txt'))
shutil.move(os.path.join(out, 'files_order.txt'), os.path.join(out, 'tmp_files', 'files_order.txt'))
shutil.move(os.path.join(out, 'info.txt'), os.path.join(out, 'tmp_files', 'info.txt'))
| gpl-3.0 |
andyh616/mne-python | mne/decoding/ems.py | 16 | 4347 | # Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ..utils import logger, verbose
from ..fixes import Counter
from ..parallel import parallel_func
from .. import pick_types, pick_info
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
"""Compute event-matched spatial filter on epochs
This version operates on the entire time course. No time window needs to
be specified. The result is a spatial filter at each time point and a
corresponding time course. Intuitively, the result gives the similarity
between the filter at each time point and the data vector (sensors) at
that time point.
References
----------
[1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None
If a list of strings, strings must match the
epochs.event_id's key as well as the number of conditions supported
by the objective_function. If None keys in epochs.event_id are used.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_epochs,)
The conditions used. Values correspond to original event ids.
"""
logger.info('...computing surrogate time series. This can take some time')
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad_epochs()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# special care to avoid path dependant mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
from sklearn.cross_validation import LeaveOneOut
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in LeaveOneOut(len(data)))
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""default diff objective function"""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause |
Eric89GXL/mne-python | mne/viz/_brain/tests/test_brain.py | 2 | 33467 | # -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Joan Massich <[email protected]>
# Guillaume Favelier <[email protected]>
# Oleh Kozynets <[email protected]>
#
# License: Simplified BSD
import os
import os.path as path
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from mne import (read_source_estimate, read_evokeds, read_cov,
read_forward_solution, pick_types_forward,
SourceEstimate, MixedSourceEstimate,
VolSourceEstimate)
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.source_space import (read_source_spaces, vertex_to_mni,
setup_volume_source_space)
from mne.datasets import testing
from mne.utils import check_version
from mne.label import read_label
from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh
from mne.viz._brain.colormap import calculate_lut
from matplotlib import cm, image
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
data_path = testing.data_path(download=False)
subject_id = 'sample'
subjects_dir = path.join(data_path, 'subjects')
fname_stc = path.join(data_path, 'MEG/sample/sample_audvis_trunc-meg')
fname_label = path.join(data_path, 'MEG/sample/labels/Vis-lh.label')
fname_cov = path.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_evoked = path.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_fwd = path.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
src_fname = path.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
class _Collection(object):
def __init__(self, actors):
self._actors = actors
def GetNumberOfItems(self):
return len(self._actors)
def GetItemAsObject(self, ii):
return self._actors[ii]
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, hemi, brain):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.hemi = hemi
self.brain = brain
self._actors = ()
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
if self.hemi == 'vol':
self.point_id = self.cell_id
return self.brain._data['vol']['grid_coords'][self.cell_id]
else:
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetProp3Ds(self):
"""Return all picked Prop3Ds."""
return _Collection(self._actors)
def GetRenderer(self):
"""Return the "renderer"."""
return self # set this to also be the renderer and active camera
GetActiveCamera = GetRenderer
def GetPosition(self):
"""Return the position."""
return np.array(self.GetPickPosition()) - (0, 0, 100)
def test_layered_mesh(renderer_interactive):
"""Test management of scalars/colormap overlay."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
mesh = _LayeredMesh(
renderer=renderer_interactive._get_renderer(size=[300, 300]),
vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]),
triangles=np.array([[0, 1, 2], [1, 2, 3]]),
normals=np.array([[0, 0, 1]] * 4),
)
assert not mesh._is_mapped
mesh.map()
assert mesh._is_mapped
assert mesh._cache is None
mesh.update()
assert len(mesh._overlays) == 0
mesh.add_overlay(
scalars=np.array([0, 1, 1, 0]),
colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),
rng=None,
opacity=None,
name='test',
)
assert mesh._cache is not None
assert len(mesh._overlays) == 1
assert 'test' in mesh._overlays
mesh.remove_overlay('test')
assert len(mesh._overlays) == 0
mesh._clean()
@testing.requires_testing_data
def test_brain_gc(renderer, brain_gc):
"""Test that a minimal version of Brain gets GC'ed."""
if renderer._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
brain = Brain('fsaverage', 'both', 'inflated', subjects_dir=subjects_dir)
brain.close()
@testing.requires_testing_data
def test_brain_init(renderer, tmpdir, pixel_ratio, brain_gc):
"""Test initialization of the Brain instance."""
if renderer._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
from mne.source_estimate import _BaseSourceEstimate
class FakeSTC(_BaseSourceEstimate):
def __init__(self):
pass
hemi = 'lh'
surf = 'inflated'
cortex = 'low_contrast'
title = 'test'
size = (300, 300)
kwargs = dict(subject_id=subject_id, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='"size" parameter must be'):
Brain(hemi=hemi, surf=surf, size=[1, 2, 3], **kwargs)
with pytest.raises(KeyError):
Brain(hemi='foo', surf=surf, **kwargs)
with pytest.raises(TypeError, match='figure'):
Brain(hemi=hemi, surf=surf, figure='foo', **kwargs)
with pytest.raises(TypeError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction=0, **kwargs)
with pytest.raises(ValueError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs)
renderer.backend._close_all()
brain = Brain(hemi=hemi, surf=surf, size=size, title=title,
cortex=cortex, units='m', **kwargs)
with pytest.raises(TypeError, match='not supported'):
brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None)
with pytest.raises(ValueError, match='add_data'):
brain.setup_time_viewer(time_viewer=True)
brain._hemi = 'foo' # for testing: hemis
with pytest.raises(ValueError, match='not be None'):
brain._check_hemi(hemi=None)
with pytest.raises(ValueError, match='either "lh" or "rh"'):
brain._check_hemi(hemi='foo')
with pytest.raises(ValueError, match='either "lh" or "rh"'):
brain._check_hemis(hemi='foo')
brain._hemi = hemi # end testing: hemis
with pytest.raises(ValueError, match='bool or positive'):
brain._to_borders(None, None, 'foo')
assert brain.interaction == 'trackball'
# add_data
stc = read_source_estimate(fname_stc)
fmin = stc.data.min()
fmax = stc.data.max()
for h in brain._hemis:
if h == 'lh':
hi = 0
else:
hi = 1
hemi_data = stc.data[:len(stc.vertices[hi]), 10]
hemi_vertices = stc.vertices[hi]
with pytest.raises(TypeError, match='scale_factor'):
brain.add_data(hemi_data, hemi=h, scale_factor='foo')
with pytest.raises(TypeError, match='vector_alpha'):
brain.add_data(hemi_data, hemi=h, vector_alpha='foo')
with pytest.raises(ValueError, match='thresh'):
brain.add_data(hemi_data, hemi=h, thresh=-1)
with pytest.raises(ValueError, match='remove_existing'):
brain.add_data(hemi_data, hemi=h, remove_existing=-1)
with pytest.raises(ValueError, match='time_label_size'):
brain.add_data(hemi_data, hemi=h, time_label_size=-1,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='is positive'):
brain.add_data(hemi_data, hemi=h, smoothing_steps=-1,
vertices=hemi_vertices)
with pytest.raises(TypeError, match='int or NoneType'):
brain.add_data(hemi_data, hemi=h, smoothing_steps='foo')
with pytest.raises(ValueError, match='dimension mismatch'):
brain.add_data(array=np.array([0, 1, 2]), hemi=h,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='vertices parameter must not be'):
brain.add_data(hemi_data, fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None)
with pytest.raises(ValueError, match='has shape'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None, time=[0, 1])
brain.add_data(hemi_data, fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=(0, 0), time=None)
with pytest.raises(ValueError, match='brain has no defined times'):
brain.set_time(0.)
assert brain.data['lh']['array'] is hemi_data
assert brain.views == ['lateral']
assert brain.hemis == ('lh',)
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps=1, initial_time=0., colorbar=False,
time=[0])
with pytest.raises(ValueError, match='the range of available times'):
brain.set_time(7.)
brain.set_time(0.)
brain.set_time_point(0) # should hit _safe_interp1d
with pytest.raises(ValueError, match='consistent with'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h,
fmax=fmax, colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=False,
time=[1])
with pytest.raises(ValueError, match='different from'):
brain.add_data(hemi_data[:, np.newaxis][:, [0, 0]],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='need shape'):
brain.add_data(hemi_data[:, np.newaxis], time=[0, 1],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='If array has 3'):
brain.add_data(hemi_data[:, np.newaxis, np.newaxis],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
# add label
label = read_label(fname_label)
with pytest.raises(ValueError, match="not a filename"):
brain.add_label(0)
with pytest.raises(ValueError, match="does not exist"):
brain.add_label('foo', subdir='bar')
label.name = None # test unnamed label
brain.add_label(label, scalar_thresh=0.)
assert isinstance(brain.labels[label.hemi], list)
assert 'unnamed' in brain._layered_meshes[label.hemi]._overlays
brain.remove_labels()
brain.add_label(fname_label)
brain.add_label('V1', borders=True)
brain.remove_labels()
brain.remove_labels()
# add foci
brain.add_foci([0], coords_as_verts=True,
hemi=hemi, color='blue')
# add text
brain.add_text(x=0, y=0, text='foo')
brain.close()
# add annotation
annots = ['aparc', path.join(subjects_dir, 'fsaverage', 'label',
'lh.PALS_B12_Lobes.annot')]
borders = [True, 2]
alphas = [1, 0.5]
colors = [None, 'r']
brain = Brain(subject_id='fsaverage', hemi='both', size=size,
surf='inflated', subjects_dir=subjects_dir)
with pytest.raises(RuntimeError, match="both hemispheres"):
brain.add_annotation(annots[-1])
with pytest.raises(ValueError, match="does not exist"):
brain.add_annotation('foo')
brain.close()
brain = Brain(subject_id='fsaverage', hemi=hemi, size=size,
surf='inflated', subjects_dir=subjects_dir)
for a, b, p, color in zip(annots, borders, alphas, colors):
brain.add_annotation(a, b, p, color=color)
brain.show_view(dict(focalpoint=(1e-5, 1e-5, 1e-5)), roll=1, distance=500)
# image and screenshot
fname = path.join(str(tmpdir), 'test.png')
assert not path.isfile(fname)
brain.save_image(fname)
assert path.isfile(fname)
brain.show_view(view=dict(azimuth=180., elevation=90.))
img = brain.screenshot(mode='rgb')
if renderer._get_3d_backend() == 'mayavi':
pixel_ratio = 1. # no HiDPI when using the testing backend
want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 3])
assert_allclose(img.shape, want_size)
brain.close()
@testing.requires_testing_data
@pytest.mark.skipif(os.getenv('CI_OS_NAME', '') == 'osx',
reason='Unreliable/segfault on macOS CI')
@pytest.mark.parametrize('hemi', ('lh', 'rh'))
def test_single_hemi(hemi, renderer_interactive, brain_gc):
"""Test single hemi support."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
stc = read_source_estimate(fname_stc)
idx, order = (0, 1) if hemi == 'lh' else (1, -1)
stc = SourceEstimate(
getattr(stc, f'{hemi}_data'), [stc.vertices[idx], []][::order],
0, 1, 'sample')
brain = stc.plot(
subjects_dir=subjects_dir, hemi='both', size=300)
brain.close()
# test skipping when len(vertices) == 0
stc.vertices[1 - idx] = np.array([])
brain = stc.plot(
subjects_dir=subjects_dir, hemi=hemi, size=300)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_save_movie(tmpdir, renderer, brain_gc):
"""Test saving a movie of a Brain instance."""
if renderer._get_3d_backend() == "mayavi":
pytest.skip('Save movie only supported on PyVista')
brain = _create_testing_brain(hemi='lh', time_viewer=False)
filename = str(path.join(tmpdir, "brain_test.mov"))
for interactive_state in (False, True):
# for coverage, we set interactivity
if interactive_state:
brain._renderer.plotter.enable()
else:
brain._renderer.plotter.disable()
with pytest.raises(TypeError, match='unexpected keyword argument'):
brain.save_movie(filename, time_dilation=1, tmin=1, tmax=1.1,
bad_name='blah')
assert not path.isfile(filename)
brain.save_movie(filename, time_dilation=0.1,
interpolation='nearest')
assert path.isfile(filename)
os.remove(filename)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc):
"""Test time viewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
with pytest.raises(ValueError, match="between 0 and 1"):
_create_testing_brain(hemi='lh', show_traces=-1.0)
with pytest.raises(ValueError, match="got unknown keys"):
_create_testing_brain(hemi='lh', surf='white', src='volume',
volume_options={'foo': 'bar'})
brain = _create_testing_brain(hemi='both', show_traces=False)
# test sub routines when show_traces=False
brain._on_pick(None, None)
brain._configure_vertex_time_course()
brain._configure_label_time_course()
brain.setup_time_viewer() # for coverage
brain.callbacks["time"](value=0)
brain.callbacks["orientation_lh_0_0"](
value='lat',
update_widget=True
)
brain.callbacks["orientation_lh_0_0"](
value='medial',
update_widget=True
)
brain.callbacks["time"](
value=0.0,
time_as_index=False,
)
brain.callbacks["smoothing"](value=1)
brain.callbacks["fmin"](value=12.0)
brain.callbacks["fmax"](value=4.0)
brain.callbacks["fmid"](value=6.0)
brain.callbacks["fmid"](value=4.0)
brain.callbacks["fscale"](value=1.1)
brain.callbacks["fmin"](value=12.0)
brain.callbacks["fmid"](value=4.0)
brain.toggle_interface()
brain.toggle_interface(value=False)
brain.callbacks["playback_speed"](value=0.1)
brain.toggle_playback()
brain.toggle_playback(value=False)
brain.apply_auto_scaling()
brain.restore_user_scaling()
brain.reset()
plt.close('all')
brain.help()
assert len(plt.get_fignums()) == 1
plt.close('all')
assert len(plt.get_fignums()) == 0
# screenshot
brain.show_view(view=dict(azimuth=180., elevation=90.))
img = brain.screenshot(mode='rgb')
want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])
assert_allclose(img.shape, want_shape)
brain.close()
@testing.requires_testing_data
@pytest.mark.parametrize('hemi', [
'lh',
pytest.param('rh', marks=pytest.mark.slowtest),
pytest.param('split', marks=pytest.mark.slowtest),
pytest.param('both', marks=pytest.mark.slowtest),
])
@pytest.mark.parametrize('src', [
'surface',
pytest.param('vector', marks=pytest.mark.slowtest),
pytest.param('volume', marks=pytest.mark.slowtest),
pytest.param('mixed', marks=pytest.mark.slowtest),
])
@pytest.mark.slowtest
def test_brain_traces(renderer_interactive, hemi, src, tmpdir,
brain_gc):
"""Test brain traces."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Only PyVista supports traces')
hemi_str = list()
if src in ('surface', 'vector', 'mixed'):
hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh'])
if src in ('mixed', 'volume'):
hemi_str.extend(['vol'])
# label traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces='label',
volume_options=None, # for speed, don't upsample
n_time=5, initial_time=0,
)
if src == 'surface':
brain._data['src'] = None # test src=None
if src in ('surface', 'vector', 'mixed'):
assert brain.show_traces
assert brain.traces_mode == 'label'
brain._label_mode_widget.setCurrentText('max')
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
if current_hemi == 'vol':
continue
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None)
assert len(brain.picked_patches[current_hemi]) == 1
for label_id in list(brain.picked_patches[current_hemi]):
label = brain._annotation_labels[current_hemi][label_id]
assert isinstance(label._line, Line2D)
brain._label_mode_widget.setCurrentText('mean')
brain.clear_glyphs()
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None) # picked and added
assert len(brain.picked_patches[current_hemi]) == 1
brain._on_pick(test_picker, None) # picked again so removed
assert len(brain.picked_patches[current_hemi]) == 0
# test switching from 'label' to 'vertex'
brain._annot_cands_widget.setCurrentText('None')
brain._label_mode_widget.setCurrentText('max')
else: # volume
assert brain._trace_mode_widget is None
assert brain._annot_cands_widget is None
assert brain._label_mode_widget is None
brain.close()
# test colormap
if src != 'vector':
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5, diverging=True,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
# mne_analyze should be chosen
ctab = brain._data['ctable']
assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan
assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow
assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3)
brain.close()
# vertex traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
assert brain.show_traces
assert brain.traces_mode == 'vertex'
assert hasattr(brain, "picked_points")
assert hasattr(brain, "_spheres")
assert brain.plotter.scalar_bar.GetNumberOfLabels() == 3
# add foci should work for volumes
brain.add_foci([[0, 0, 0]], hemi='lh' if src == 'surface' else 'vol')
# test points picked by default
picked_points = brain.get_picked_points()
spheres = brain._spheres
for current_hemi in hemi_str:
assert len(picked_points[current_hemi]) == 1
n_spheres = len(hemi_str)
if hemi == 'split' and src in ('mixed', 'volume'):
n_spheres += 1
assert len(spheres) == n_spheres
# test switching from 'vertex' to 'label'
if src == 'surface':
brain._annot_cands_widget.setCurrentText('aparc')
brain._annot_cands_widget.setCurrentText('None')
# test removing points
brain.clear_glyphs()
assert len(spheres) == 0
for key in ('lh', 'rh', 'vol'):
assert len(picked_points[key]) == 0
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
assert len(spheres) == 0
if current_hemi == 'vol':
current_mesh = brain._data['vol']['grid']
vertices = brain._data['vol']['vertices']
values = current_mesh.cell_arrays['values'][vertices]
cell_id = vertices[np.argmax(np.abs(values))]
else:
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(None, None, current_hemi, brain)
assert brain._on_pick(test_picker, None) is None
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert cell_id == test_picker.cell_id
assert test_picker.point_id is None
brain._on_pick(test_picker, None)
brain._on_pick(test_picker, None)
assert test_picker.point_id is not None
assert len(picked_points[current_hemi]) == 1
assert picked_points[current_hemi][0] == test_picker.point_id
assert len(spheres) > 0
sphere = spheres[-1]
vertex_id = sphere._vertex_id
assert vertex_id == test_picker.point_id
line = sphere._line
hemi_prefix = current_hemi[0].upper()
if current_hemi == 'vol':
assert hemi_prefix + ':' in line.get_label()
assert 'MNI' in line.get_label()
continue # the MNI conversion is more complex
hemi_int = 0 if current_hemi == 'lh' else 1
mni = vertex_to_mni(
vertices=vertex_id,
hemis=hemi_int,
subject=brain._subject_id,
subjects_dir=brain._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_prefix, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
assert line.get_label() == label
# remove the sphere by clicking in its vicinity
old_len = len(spheres)
test_picker._actors = sum((s._actors for s in spheres), [])
brain._on_pick(test_picker, None)
assert len(spheres) < old_len
screenshot = brain.screenshot()
screenshot_all = brain.screenshot(time_viewer=True)
assert screenshot.shape[0] < screenshot_all.shape[0]
# and the scraper for it (will close the instance)
# only test one condition to save time
if not (hemi == 'rh' and src == 'surface' and
check_version('sphinx_gallery')):
brain.close()
return
fnames = [str(tmpdir.join(f'temp_{ii}.png')) for ii in range(2)]
block_vars = dict(image_path_iterator=iter(fnames),
example_globals=dict(brain=brain))
block = ('code', """
something
# brain.save_movie(time_dilation=1, framerate=1,
# interpolation='linear', time_viewer=True)
#
""", 1)
gallery_conf = dict(src_dir=str(tmpdir), compress_images=[])
scraper = _BrainScraper()
rst = scraper(block, block_vars, gallery_conf)
assert brain.plotter is None # closed
gif_0 = fnames[0][:-3] + 'gif'
for fname in (gif_0, fnames[1]):
assert path.basename(fname) in rst
assert path.isfile(fname)
img = image.imread(fname)
assert img.shape[1] == screenshot.shape[1] # same width
assert img.shape[0] > screenshot.shape[0] # larger height
assert img.shape[:2] == screenshot_all.shape[:2]
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_linkviewer(renderer_interactive, brain_gc):
"""Test _LinkViewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Linkviewer only supported on PyVista')
brain1 = _create_testing_brain(hemi='lh', show_traces=False)
brain2 = _create_testing_brain(hemi='lh', show_traces='separate')
brain1._times = brain1._times * 2
with pytest.warns(RuntimeWarning, match='linking time'):
link_viewer = _LinkViewer(
[brain1, brain2],
time=True,
camera=False,
colorbar=False,
picking=False,
)
brain_data = _create_testing_brain(hemi='split', show_traces='vertex')
link_viewer = _LinkViewer(
[brain2, brain_data],
time=True,
camera=True,
colorbar=True,
picking=True,
)
link_viewer.set_time_point(value=0)
link_viewer.brains[0].mpl_canvas.time_func(0)
link_viewer.set_fmin(0)
link_viewer.set_fmid(0.5)
link_viewer.set_fmax(1)
link_viewer.set_playback_speed(value=0.1)
link_viewer.toggle_playback()
del link_viewer
brain1.close()
brain2.close()
brain_data.close()
def test_calculate_lut():
"""Test brain's colormap functions."""
colormap = "coolwarm"
alpha = 1.0
fmin = 0.0
fmid = 0.5
fmax = 1.0
center = None
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
center = 0.0
colormap = cm.get_cmap(colormap)
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
cmap = cm.get_cmap(colormap)
zero_alpha = np.array([1., 1., 1., 0])
half_alpha = np.array([1., 1., 1., 0.5])
atol = 1.5 / 256.
# fmin < fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 2, 3)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[63], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[192], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 1, 1)
zero_alpha = np.array([1., 1., 1., 0])
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 0, 0, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 1, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0.) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[62], cmap(0.245), atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[193], cmap(0.755), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
lut = calculate_lut(colormap, alpha, 0, 0, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[126], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[129], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin < fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 2, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 2, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[32], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[223], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.7475), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=2 * atol)
lut = calculate_lut(colormap, alpha, 0, 1, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[64], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.75), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
with pytest.raises(ValueError, match=r'.*fmin \(1\) <= fmid \(0\) <= fma'):
calculate_lut(colormap, alpha, 1, 0, 2)
def _create_testing_brain(hemi, surf='inflated', src='surface', size=300,
n_time=5, diverging=False, **kwargs):
assert src in ('surface', 'vector', 'mixed', 'volume')
meth = 'plot'
if src in ('surface', 'mixed'):
sample_src = read_source_spaces(src_fname)
klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate
if src == 'vector':
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
stc = apply_inverse(evoked, free, pick_ori='vector')
return stc.plot(
subject=subject_id, hemi=hemi, size=size,
subjects_dir=subjects_dir, colormap='auto',
**kwargs)
if src in ('volume', 'mixed'):
vol_src = setup_volume_source_space(
subject_id, 7., mri='aseg.mgz',
volume_label='Left-Cerebellum-Cortex',
subjects_dir=subjects_dir, add_interpolator=False)
assert len(vol_src) == 1
assert vol_src[0]['nuse'] == 150
if src == 'mixed':
sample_src = sample_src + vol_src
else:
sample_src = vol_src
klass = VolSourceEstimate
meth = 'plot_3d'
assert sample_src.kind == src
# dense version
rng = np.random.RandomState(0)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \
rng.rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
if diverging:
stc_data -= 0.5
stc = klass(stc_data, vertices, 1, 1)
clim = dict(kind='value', lims=[0.1, 0.2, 0.3])
if diverging:
clim['pos_lims'] = clim.pop('lims')
brain_data = getattr(stc, meth)(
subject=subject_id, hemi=hemi, surface=surf, size=size,
subjects_dir=subjects_dir, colormap='auto',
clim=clim, src=sample_src,
**kwargs)
return brain_data
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/stackplot.py | 7 | 4266 | """
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://www.leebyron.com/else/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
r = []
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size / total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
if 'color' in axes._get_lines._prop_keys:
color = six.next(axes._get_lines.prop_cycler)['color']
else:
color = None
r.append(axes.fill_between(x, first_line, stack[0, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
if 'color' in axes._get_lines._prop_keys:
color = six.next(axes._get_lines.prop_cycler)['color']
else:
color = None
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| mit |
ishay2b/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 34 | 12484 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
brynpickering/calliope | calliope/backend/pyomo/model.py | 1 | 8740 | """
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
"""
import logging
import os
import ruamel.yaml
from contextlib import redirect_stdout, redirect_stderr
import numpy as np
import pandas as pd
import xarray as xr
import pyomo.core as po # pylint: disable=import-error
from pyomo.opt import SolverFactory # pylint: disable=import-error
# pyomo.environ is needed for pyomo solver plugins
import pyomo.environ # pylint: disable=unused-import,import-error
# TempfileManager is required to set log directory
from pyutilib.services import TempfileManager # pylint: disable=import-error
from calliope.backend.pyomo.util import get_var
from calliope.core.util.tools import load_function
from calliope.core.util.logging import LogWriter, logger
from calliope.core.util.dataset import reorganise_dataset_dimensions
from calliope import exceptions
def generate_model(model_data):
"""
Generate a Pyomo model.
"""
backend_model = po.ConcreteModel()
mode = model_data.attrs['run.mode'] # 'plan' or 'operate'
backend_model.mode = mode
# Sets
for coord in list(model_data.coords):
set_data = list(model_data.coords[coord].data)
# Ensure that time steps are pandas.Timestamp objects
if isinstance(set_data[0], np.datetime64):
set_data = pd.to_datetime(set_data)
setattr(
backend_model, coord,
po.Set(initialize=set_data, ordered=True)
)
# "Parameters"
model_data_dict = {
'data': {
k: v.to_series().dropna().replace('inf', np.inf).to_dict()
for k, v in model_data.data_vars.items()
if v.attrs['is_result'] == 0 or v.attrs.get('operate_param', 0) == 1
},
'dims': {
k: v.dims
for k, v in model_data.data_vars.items()
if v.attrs['is_result'] == 0 or v.attrs.get('operate_param', 0) == 1
},
'sets': list(model_data.coords),
'attrs': {k: v for k, v in model_data.attrs.items() if k is not 'defaults'}
}
# Dims in the dict's keys are ordered as in model_data, which is enforced
# in model_data generation such that timesteps are always last and the
# remainder of dims are in alphabetic order
backend_model.__calliope_model_data__ = model_data_dict
backend_model.__calliope_defaults__ = (
ruamel.yaml.load(model_data.attrs['defaults'], Loader=ruamel.yaml.Loader)
)
for k, v in model_data_dict['data'].items():
if k in backend_model.__calliope_defaults__.keys():
setattr(
backend_model, k,
po.Param(*[getattr(backend_model, i)
for i in model_data_dict['dims'][k]],
initialize=v, mutable=True,
default=backend_model.__calliope_defaults__[k])
)
elif k == 'timestep_resolution' or k == 'timestep_weights': # no default value to look up
setattr(
backend_model, k,
po.Param(backend_model.timesteps, initialize=v, mutable=True)
)
elif mode == 'operate' and model_data[k].attrs.get('operate_param') == 1:
setattr(
backend_model, k,
po.Param(getattr(backend_model, model_data_dict['dims'][k][0]),
initialize=v, mutable=True)
)
# Variables
load_function(
'calliope.backend.pyomo.variables.initialize_decision_variables'
)(backend_model)
# Constraints
constraints_to_add = [
'energy_balance.load_constraints',
'dispatch.load_constraints',
'network.load_constraints',
'costs.load_constraints',
'policy.load_constraints'
]
if mode != 'operate':
constraints_to_add.append('capacity.load_constraints')
if hasattr(backend_model, 'loc_techs_conversion'):
constraints_to_add.append('conversion.load_constraints')
if hasattr(backend_model, 'loc_techs_conversion_plus'):
constraints_to_add.append('conversion_plus.load_constraints')
if hasattr(backend_model, 'loc_techs_milp') or hasattr(backend_model, 'loc_techs_purchase'):
constraints_to_add.append('milp.load_constraints')
# Export comes last as it can add to the cost expression, this could be
# overwritten if it doesn't come last
if hasattr(backend_model, 'loc_techs_export'):
constraints_to_add.append('export.load_constraints')
for c in constraints_to_add:
load_function(
'calliope.backend.pyomo.constraints.' + c
)(backend_model)
# FIXME: Optional constraints
# optional_constraints = model_data.attrs['constraints']
# if optional_constraints:
# for c in optional_constraints:
# self.add_constraint(load_function(c))
# Objective function
# FIXME re-enable loading custom objectives
# fetch objective function by name, pass through objective options
# if they are present
objective_function = ('calliope.backend.pyomo.objective.' +
model_data.attrs['run.objective'])
objective_args = dict([(k.split('.')[-1], v)
for k, v in model_data.attrs.items()
if (k.startswith('run.objective_options'))
])
load_function(objective_function)(backend_model, **objective_args)
# delattr(backend_model, '__calliope_model_data__')
return backend_model
def solve_model(backend_model, solver,
solver_io=None, solver_options=None, save_logs=False,
**solve_kwargs):
"""
Solve a Pyomo model using the chosen solver and all necessary solver options
Returns a Pyomo results object
"""
opt = SolverFactory(solver, solver_io=solver_io)
if solver_options:
for k, v in solver_options.items():
opt.options[k] = v
if save_logs:
solve_kwargs.update({
'symbolic_solver_labels': True,
'keepfiles': True
})
os.makedirs(save_logs, exist_ok=True)
TempfileManager.tempdir = save_logs # Sets log output dir
if 'warmstart' in solve_kwargs.keys() and solver == 'glpk':
exceptions.ModelWarning(
'The chosen solver, GLPK, does not suport warmstart, which may '
'impact performance.'
)
del solve_kwargs['warmstart']
with redirect_stdout(LogWriter('solver', strip=True)):
with redirect_stderr(LogWriter('error', strip=True)):
results = opt.solve(backend_model, tee=True, **solve_kwargs)
return results
def load_results(backend_model, results):
"""Load results into model instance for access via model variables."""
not_optimal = (
results['Solver'][0]['Termination condition'].key != 'optimal'
)
this_result = backend_model.solutions.load_from(results)
if this_result is False or not_optimal:
logger.critical('Problem status:')
for l in str(results.Problem).split('\n'):
logger.critical(l)
logger.critical('Solver status:')
for l in str(results.Solver).split('\n'):
logger.critical(l)
if not_optimal:
message = 'Model solution was non-optimal.'
else:
message = 'Could not load results into model instance.'
exceptions.BackendWarning(message)
return results['Solver'][0]['Termination condition'].key
def get_result_array(backend_model, model_data):
"""
From a Pyomo model object, extract decision variable data and return it as
an xarray Dataset. Any rogue input parameters that are constructed inside
the backend (instead of being passed by calliope.Model().inputs) are also
added to calliope.Model()._model_data in-place.
"""
all_variables = {
i.name: get_var(backend_model, i.name) for i in backend_model.component_objects()
if isinstance(i, po.base.var.IndexedVar)
}
# Get any parameters that did not appear in the user's model.inputs Dataset
all_params = {
i.name: get_var(backend_model, i.name)
for i in backend_model.component_objects()
if isinstance(i, po.base.param.IndexedParam) and
i.name not in model_data.data_vars.keys()
}
results = reorganise_dataset_dimensions(xr.Dataset(all_variables))
if all_params:
additional_inputs = reorganise_dataset_dimensions(xr.Dataset(all_params))
for var in additional_inputs.data_vars:
additional_inputs[var].attrs['is_result'] = 0
model_data.update(additional_inputs)
return results
| apache-2.0 |
cmusatyalab/opendiamond | opendiamond/proxy/search.py | 1 | 22985 | #
# The OpenDiamond Platform for Interactive Search
#
# Copyright (c) 2011-2019 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
'''Search state; control and blast channel handling.'''
import binascii
import uuid
from collections import deque, defaultdict
from hashlib import sha256
import struct
from functools import wraps
import logging
import multiprocessing as mp
import os
import signal
import threading
import json
import io
import random
import numpy as np
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import time
from io import BytesIO
from opendiamond import protocol
from opendiamond.protocol import (
DiamondRPCFailure, DiamondRPCFCacheMiss, DiamondRPCCookieExpired,
DiamondRPCSchemeNotSupported)
from opendiamond.protocol import (
XDR_setup, XDR_filter_config, XDR_blob_data, XDR_start, XDR_reexecute,
DiamondRPCFCacheMiss)
from opendiamond.rpc import RPCHandlers, RPCError, RPCProcedureUnavailable
from opendiamond.scope import get_cookie_map, ScopeCookie, ScopeError, ScopeCookieExpired
from opendiamond.client.rpc import ControlConnection, BlastConnection
from opendiamond.server.object_ import EmptyObject
from opendiamond.rpc import RPCError, ConnectionFailure
from opendiamond.proxy.filter import ProxyFilter
from opendiamond.proxy.utils import load_dataset_from_zipfile
from sklearn.svm import SVC
from zipfile import ZipFile
logging.info('Starting logger for...')
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
proxy_model = None
model_lock = threading.Lock()
dropped_count = 0
true_dropped = 0
stats_lock = threading.Lock()
class ProxySearch(RPCHandlers):
'''State for a single search, plus handlers for control channel RPCs
to modify it.'''
log_rpcs = True
running = False
def __init__(self, blast_conn, docker_address):
RPCHandlers.__init__(self)
self._blast_conn = blast_conn
self.blast_start = False
self._scope = None
self._connections = None
self.blob_map = {}
self._running = False
self.proxy_filter = None
self.docker_address = docker_address
self.current_stats = {}
self.previous_stats = {'objs_passed': 0,
'objs_unloadable': 0,
'objs_dropped': 0,
'objs_total': 0,
'objs_true_positive': 0,
'objs_processed': 0,
'objs_false_negative': 0,
'avg_obj_time_us': 0}
def shutdown(self):
'''Clean up the search before the process exits.'''
# Clean up the resource context before terminate() to avoid corrupting the shared data structures.
ProxySearch.running = False
if self._connections:
for c in self._connections.values():
c.close()
try:
os.kill(os.getpid(), signal.SIGKILL)
except OSError:
pass
# This is not a static method: it's only called when initializing the
# class, and the staticmethod() decorator does not create a callable.
# Also avoid complaints about accesses to self._running
# pylint: disable=no-self-argument,protected-access
def running(should_be_running):
'''Decorator that specifies that the handler can only be called
before, or after, the search has started running.'''
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._running != should_be_running:
raise RPCProcedureUnavailable()
return func(self, *args, **kwargs)
return wrapper
return decorator
# pylint: enable=no-self-argument,protected-access
def _check_runnable(self):
'''Validate state preparatory to starting a search or reexecution.'''
if self._scope is None:
raise DiamondRPCFailure('No search scope configured')
if not self._filters:
raise DiamondRPCFailure('No filters configured')
@RPCHandlers.handler(25, protocol.XDR_setup, protocol.XDR_blob_list)
@running(False)
def setup(self, params):
self.blast_start = False
_log.info('Set up called {}'.format(params))
def log_header(desc):
_log.info(' %s:', desc)
def log_item(key, fmt, *args):
_log.info(' %-14s ' + fmt, key + ':', *args)
#Scope Cookie Parameters
try:
cookies = [ScopeCookie.parse(c) for c in params.cookies]
_log.info('Scope cookies:')
for cookie in cookies:
log_header(cookie.serial)
log_item('Servers', '%s', ', '.join(cookie.servers))
log_item('Scopes', '%s', ', '.join(cookie.scopeurls))
log_item('Expires', '%s', cookie.expires)
except ScopeCookieExpired as e:
_log.warning('%s', e)
raise DiamondRPCCookieExpired()
except ScopeError as e:
_log.warning('Cookie invalid: %s', e)
raise DiamondRPCFailure()
self._cookie_map = get_cookie_map(cookies)
self._filters = params.filters
self._scope = cookies
# host -> _DiamondConnection
self._connections = dict((h, _DiamondConnection(h))
for h in self._cookie_map)
self._blast = _DiamondBlastSet(self._blast_conn, self._connections.values())
self.missing_blob_list = set()
proxy_index = 0
#Proxy filter arguments [name, max_num_filters, majority_frac, threshold]
for i, f in enumerate(self._filters):
if f.name == "PROXY":
self.proxy_filter = ProxyFilter(f)
proxy_index = i
self.missing_blob_list.add(f.code)
self.missing_blob_list.add(f.blob)
#Removing proxy filter from filter list
if self.proxy_filter:
self._filters.pop(proxy_index)
for h, c in self._connections.items():
try:
c.connect()
except:
_log.error("Can't start search on %s. "
"May be expired cookie, corrupted filters, "
"network failure, service not running, "
"no space on disk, etc.?", h)
raise
return protocol.XDR_blob_list(self.missing_blob_list)
def get_features_intial(self):
print("PROXY STARTED {}".format(self.docker_address))
port = 5000
mpredict_url = 'http://{}:{}/mpredict'.format(self.docker_address, port)
predict_url = 'http://{}:{}/predict'.format(self.docker_address, port)
retry = Retry(total=5, backoff_factor=0.3, method_whitelist=False)
adapter = HTTPAdapter(max_retries=retry)
request_session = requests.Session()
request_session.mount('http://', adapter)
request_session.mount('https://', adapter)
print("Training SVM on Input ZipFile...")
#Extract images from zipfile
proxy_blob = ZipFile(BytesIO(self.blob_map[self.proxy_filter.blob]), 'r')
dir_names, dataset = load_dataset_from_zipfile(proxy_blob)
#Get feature-vectors
for label, dir_name in enumerate(dir_names):
files_dct = dict((str(i), io.BytesIO(data))
for i, data in enumerate(dataset[dir_name]))
payload = {'cache': 'true'}
response = request_session.post(mpredict_url,
files=files_dct, data=payload)
assert response.ok
results = response.json()
for filename, res in results.items():
if res['success']:
feature = res['feature']
if label == 1:
self.proxy_filter.addItem(label, filename, feature)
else:
self.proxy_filter.addItem(label, None, feature)
else:
print("Cannot get DNN features for example {} in label {}: {}"
.format(filename, dir_name, res['error']))
return
@RPCHandlers.handler(26, protocol.XDR_blob_data)
@running(False)
def send_blobs(self, params):
'''Add blobs to the blob cache.'''
_log.info('Received %d blobs, %d bytes', len(params.blobs),
sum([len(b) for b in params.blobs]))
#Store filter hashcode in hashmap
for b in params.blobs:
assert isinstance(b, (str, bytes))
b = b if isinstance(b, bytes) else b.encode()
hashcode = 'sha256:' + sha256(b).hexdigest()
self.blob_map[hashcode] = b
if self.proxy_filter:
self.get_features_intial()
#Train Ensemble
print("Started model Training...")
self.proxy_filter.trainEnsemble()
new_model = self.proxy_filter.getPrunedModels()
global proxy_model
with model_lock:
proxy_model = new_model
print("Set new model")
for h, c in self._connections.items():
try:
missing_blob = []
missing = c.setup(self._cookie_map[h], self._filters)
for m in missing.uris:
missing_blob.append(self.blob_map[m])
if missing_blob:
c.send_blobs(missing_blob)
except:
_log.error("Can't start search on")
return
@RPCHandlers.handler(28, protocol.XDR_start)
@running(False)
def start(self, params):
'''Start the search.'''
self.blast_start = False
try:
self._check_runnable()
except RPCError as e:
_log.warning('Cannot start search: %s', str(e))
raise
if params.attrs is not None:
push_attrs = set(params.attrs)
else:
# Encode everything
push_attrs = None
_log.info('Push attributes:u%s',
','.join(
params.attrs) if params.attrs else '(everything)')
ProxySearch.running = True
self._running = True
search_id = str(uuid.uuid4())
for h, c in self._connections.items():
c.run_search(search_id.encode(), push_attrs)
#Start Blast channel
self._blast.start()
self.blast_start = True
threading.Thread(target=self.dispatch_blast,args=(self._blast_conn,self._blast)).start()
return
@RPCHandlers.handler(31, protocol.XDR_retrain)
def retrain_filter(self, params):
if not self.proxy_filter or not params.names:
return
if not params.features[0]:
return
ProxySearch.running = False
#ReTrainFilters
self.proxy_filter.addItemList(params)
self.proxy_filter.trainEnsemble()
new_model = self.proxy_filter.getPrunedModels()
global proxy_model
with model_lock:
proxy_model = new_model
print("New model created of Length:{}".format(len(proxy_model)))
ProxySearch.running = True
return
@RPCHandlers.handler(30, protocol.XDR_reexecute,
protocol.XDR_attribute_list)
def reexecute_filters(self, params):
# Just Pass
try:
print(params.object_id)
print(params.hostname)
print(self._connections[params.hostname])
return self._connections[params.hostname].control.reexecute_filters(params)
except ConnectionFailure:
self.shutdown()
except RPCError:
_log.exception('Reexecution Error')
self.shutdown()
@RPCHandlers.handler(29, reply_class=protocol.XDR_search_stats)
@running(True)
def request_stats(self):
def combine_into(dest, src):
for stat in src:
dest.setdefault(stat.name, 0)
dest[stat.name] += stat.value
def get_stats(input_dict):
result = []
for k in input_dict.keys():
global dropped_count
global true_dropped
with stats_lock:
if k == "objs_processed":
input_dict[k] = input_dict[k] - self.previous_stats[k]
if k == "objs_dropped":
input_dict[k] = input_dict[k] - self.previous_stats[k] + dropped_count
if k == "objs_false_negative":
input_dict[k] = input_dict[k] - self.previous_stats[k]
result.append(protocol.XDR_stat(k, input_dict[k]))
return result
try:
results = [c.control.request_stats() for c in
self._connections.values()]
self.current_stats = {}
filter_stats = {}
for result in results:
combine_into(self.current_stats, result.stats)
for f in result.filter_stats:
combine_into(filter_stats.setdefault(f.name, {}),
f.stats)
filter_stats_list = [ protocol.XDR_filter_stats(k,
get_stats(filter_stats[k]))
for k in filter_stats.keys()]
return(protocol.XDR_search_stats(get_stats(self.current_stats), filter_stats_list))
except RPCError:
_log.exception('Statistics request failed')
self.shutdown()
@RPCHandlers.handler(18, reply_class=protocol.XDR_session_vars)
@running(True)
def session_variables_get(self):
pass
@RPCHandlers.handler(19, protocol.XDR_session_vars)
@running(True)
def session_variables_set(self, params):
pass
@staticmethod
def dispatch_blast(conn, blast_channel):
while True:
try:
#if not blast_channel.queue_empty():
conn.dispatch(blast_channel)
except ConnectionFailure:
break
except RPCError:
_log.exception('Cannot receive blast object')
break
class _DiamondBlastSet(RPCHandlers):
"""
Pool a set of _DiamondConnection's and return results from them as one stream.
"""
def __init__(self, conn, connections):
RPCHandlers.__init__(self)
# Connections that have not finished searching
self._conn = conn
self._connections = set(connections)
self.pending_objs = deque()
self.pending_conns = deque()
self.pending_reqs = deque()
self._started = False
self.running = False
self.high_conf = protocol.XDR_attribute("_score.string", (str(1)+ '\0').encode())
self.mid_conf = protocol.XDR_attribute("_score.string", (str(2)+ '\0').encode())
self.low_conf = protocol.XDR_attribute("_score.string", (str(0)+ '\0').encode())
self.setup_requests()
def setup_requests(self):
for _ in range(len(self._connections)):
self.pending_reqs.append(1)
def queue_empty(self):
return not self.pending_objs
def add_request(self):
self.pending_reqs.append(1)
def decrease_request(self):
if self.pending_reqs:
self.pending_reqs.popleft()
def get_prediction(self, features):
global proxy_model
#Assuming num_classes = 2
#Ideally can find num_classes for the max index of model.classes_
num_classes = 2
with model_lock:
num_models = len(proxy_model)
num_items = features.shape[0]
pred_proba_list = np.zeros((num_items, num_classes, num_models))
for j, m in enumerate(proxy_model):
pred = m.predict_proba(features)
pred_proba_list[:,m.classes_,j] = pred
pred_mean = np.mean(pred_proba_list, axis =-1)
return pred_mean[:,1]
def fetch_object(self):
global proxy_model
while True:
while not self.pending_objs:
pass #wait till present
obj = self.pending_objs.popleft()
with model_lock:
if not proxy_model:
return obj
gt_present = False
features = None
for a in obj.attrs:
if a.name == "feature_vector.json":
features = np.array([json.loads(a.value)])
if a.name == "_gt_label":
gt_present = True
if features is None:
return obj
pred = self.get_prediction(features)
pred_attr = protocol.XDR_attribute("prediction.string", (str(pred)+ '\0').encode())
obj.attrs.append(pred_attr)
if pred > 0.85:
obj.attrs.append(self.high_conf)
elif pred < 0.4:
# only send 2% of rejected items
sample = np.random.uniform()
if gt_present:
print("FN pred:{}".format(pred))
#TODO Count this in FN
if sample > 0.02:
continue
global dropped_count
with stats_lock:
dropped_count += 1
obj.attrs.append(self.low_conf)
else:
obj.attrs.append(self.mid_conf)
return obj
@RPCHandlers.handler(2, reply_class=protocol.XDR_object)
def get_object(self):
while not ProxySearch.running:
self.pending_objs.clear()
self.pending_reqs.clear()
#self._started = False
self.setup_requests()
time.sleep(2)
continue
if not self._started:
self.setup_requests()
time.sleep(2)
self._started = True
self.add_request()
obj = self.fetch_object()
self.decrease_request()
return obj
def start(self):
"""
:return: A generator that yields search results from underlying connections.
"""
self._started = True
self._try_start()
self.running = True
return
def _try_start(self):
"""A generator yielding search results from
all underlying DiamondConnection's."""
if self._started:
def worker(handler):
try:
while True:
if not ProxySearch.running:
continue
obj = next(handler)
self.pending_objs.append(obj)
except StopIteration:
print("Stop request called")
pass
finally:
self.pending_conns.popleft()
for conn in self._connections:
self.pending_conns.append(1) # just a token
threading.Thread(target=worker,
args=(self._handle_objects(conn,self.pending_reqs),)).start()
@staticmethod
def _handle_objects(conn, req_queue):
"""A generator yielding search results from a DiamondConnection."""
while True:
if not ProxySearch.running:
continue
try:
while not req_queue:
pass
dct = conn.get_result()
except ConnectionFailure:
break
except RPCError:
_log.exception('Cannot receive blast object')
conn.close()
break
if not len(dct.attrs):
break
yield dct
class _DiamondConnection(object):
def __init__(self, address):
self._finished = False # No more results
self._closed = False # Connection closed
self.address = address
self.control = ControlConnection()
self.blast = BlastConnection()
def connect(self):
_log.info("Creating control channel to %s", self.address)
nonce = self.control.connect(self.address)
_log.info("Done. Nonce %s", binascii.hexlify(nonce))
_log.info("Creating blast channel. Nonce %s", binascii.hexlify(nonce))
self.blast.connect(self.address, nonce)
_log.info("Done.")
@staticmethod
def _blob_uri(blob):
return 'sha256:' + blob.sha256
def setup(self, cookies, filters):
# Send setup request
request = XDR_setup(
cookies=[c.encode() for c in cookies],
filters=[XDR_filter_config(
name=f.name,
arguments=f.arguments,
dependencies=f.dependencies,
min_score=f.min_score,
max_score=f.max_score,
code=f.code,
blob=f.blob
) for f in filters],
)
reply = self.control.setup(request)
return reply
def send_blobs(self, blobs):
blob = XDR_blob_data(blobs=blobs)
self.control.send_blobs(blob)
def run_search(self, search_id, attrs=None):
_log.info("Running search %s", search_id)
request = XDR_start(search_id=search_id, attrs=attrs)
self.control.start(request)
def get_result(self):
"""
:return: a dictionary of received attribute-value pairs of an object.
Return will an empty dict when search terminates.
"""
reply = self.blast.get_object()
return reply
def evaluate(self, cookies, filters, blob, attrs=None):
"""
Also known as re-execution.
:param cookies:
:param filters:
:param blob:
:param attrs:
:return: A dictionary of the re-executed object's attribute-value pairs.
"""
self.connect()
self.setup(cookies, filters)
# Send reexecute request
request = XDR_reexecute(object_id=self._blob_uri(blob), attrs=attrs)
try:
reply = self.control.reexecute_filters(request)
except DiamondRPCFCacheMiss:
# Send object data and retry
self.control.send_blobs(XDR_blob_data(blobs=[str(blob)]))
reply = self.control.reexecute_filters(request)
# Return object attributes
dct = dict((attr.name, attr.value) for attr in reply.attrs)
return dct
def close(self):
if not self._closed:
self._closed = True
self.control.close()
self.blast.close()
| epl-1.0 |
lukebarnard1/bokeh | bokeh/session.py | 42 | 20253 | ''' The session module provides the Session class, which encapsulates a
connection to a Document that resides on a Bokeh server.
The Session class provides methods for creating, loading and storing
documents and objects, as well as methods for user-authentication. These
are useful when the server is run in multi-user mode.
'''
from __future__ import absolute_import, print_function
#--------
# logging
#--------
import logging
logger = logging.getLogger(__name__)
#-------------
# standard lib
#-------------
import time
import json
from os import makedirs
from os.path import expanduser, exists, join
import tempfile
#------------
# third party
#------------
from six.moves.urllib.parse import urlencode
from requests.exceptions import ConnectionError
#---------
# optional
#---------
try:
import pandas as pd
import tables
has_pandas = True
except ImportError as e:
has_pandas = False
#--------
# project
#--------
from . import browserlib
from . import protocol
from .embed import autoload_server
from .exceptions import DataIntegrityException
from .util.notebook import publish_display_data
from .util.serialization import dump, get_json, urljoin
DEFAULT_SERVER_URL = "http://localhost:5006/"
class Session(object):
""" Encapsulate a connection to a document stored on a Bokeh Server.
Args:
name (str, optional) : name of server
root_url (str, optional) : root url of server
userapikey (str, optional) : (default: "nokey")
username (str, optional) : (default: "defaultuser")
load_from_config (bool, optional) :
Whether to load login information from config. (default: True)
If False, then we may overwrite the user's config.
configdir (str) : location of user configuration information
Attributes:
base_url (str) :
configdir (str) :
configfile (str) :
http_session (requests.session) :
userapikey (str) :
userinfo (dict) :
username (str) :
"""
def __init__(
self,
name = DEFAULT_SERVER_URL,
root_url = DEFAULT_SERVER_URL,
userapikey = "nokey",
username = "defaultuser",
load_from_config = True,
configdir = None,
):
self.name = name
if not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self.root_url = root_url
# single user mode case
self.userapikey = userapikey
self.username = username
self._configdir = None
if configdir:
self.configdir = configdir
if load_from_config:
self.load()
@property
def http_session(self):
if hasattr(self, "_http_session"):
return self._http_session
else:
import requests
self._http_session = requests.session()
return self._http_session
@property
def username(self):
return self.http_session.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.http_session.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.http_session.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.http_session.headers.update({'BOKEHUSER-API-KEY': val})
@property
def configdir(self):
""" filename where our config are stored. """
if self._configdir:
return self._configdir
bokehdir = join(expanduser("~"), ".bokeh")
if not exists(bokehdir):
makedirs(bokehdir)
return bokehdir
# for testing
@configdir.setter
def configdir(self, path):
self._configdir = path
@property
def configfile(self):
return join(self.configdir, "config.json")
def load_dict(self):
configfile = self.configfile
if not exists(configfile):
data = {}
else:
with open(configfile, "r") as f:
data = json.load(f)
return data
def load(self):
""" Loads the server configuration information from disk
Returns:
None
"""
config_info = self.load_dict().get(self.name, {})
print("Using saved session configuration for %s" % self.name)
print("To override, pass 'load_from_config=False' to Session")
self.root_url = config_info.get('root_url', self.root_url)
self.userapikey = config_info.get('userapikey', self.userapikey)
self.username = config_info.get('username', self.username)
def save(self):
""" Save the server configuration information to JSON
Returns:
None
"""
data = self.load_dict()
data[self.name] = {'root_url': self.root_url,
'userapikey': self.userapikey,
'username': self.username}
configfile = self.configfile
with open(configfile, "w+") as f:
json.dump(data, f)
def register(self, username, password):
''' Register a new user with a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to register
password (str) : user password for account
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/register")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
def login(self, username, password):
''' Log a user into a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to log in
password (str) : user password
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/login")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
self.save()
def browser_login(self):
""" Open a browser with a token that logs the user into a bokeh server.
.. note::
This is useful in multi-user mode.
Return:
None
"""
controller = browserlib.get_browser_controller()
url = urljoin(self.root_url, "bokeh/loginfromapikey")
url += "?" + urlencode({'username': self.username,
'userapikey': self.userapikey})
controller.open(url)
def data_source(self, name, data):
""" Makes and uploads a server data source to the server.
.. note::
The server must be configured with a data directory.
Args:
name (str) : name for the data source object
data (pd.DataFrame or np.array) : data to upload
Returns:
a ServerDataSource
"""
raise NotImplementedError
def list_data(self):
""" Return all the data soruces on the server.
Returns:
sources : JSON
"""
raise NotImplementedError
def publish(self):
url = urljoin(self.root_url, "/bokeh/%s/publish" % self.docid)
self.post_json(url)
def execute(self, method, url, headers=None, **kwargs):
""" Execute an HTTP request using the current session.
Returns the response
Args:
method (string) : 'get' or 'post'
url (string) : url
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response
Returns the response
"""
import requests
import warnings
func = getattr(self.http_session, method)
try:
resp = func(url, headers=headers, **kwargs)
except requests.exceptions.ConnectionError as e:
warnings.warn("You need to start the bokeh-server to see this example.")
raise e
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
def execute_json(self, method, url, headers=None, **kwargs):
""" same as execute, except ensure that json content-type is
set in headers and interprets and returns the json response
"""
if headers is None:
headers = {}
headers['content-type'] = 'application/json'
resp = self.execute(method, url, headers=headers, **kwargs)
return get_json(resp)
def get_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'get'.
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('get', url, headers=headers, **kwargs)
def post_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'post'
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('post', url, headers=headers, **kwargs)
@property
def userinfo(self):
if not hasattr(self, "_userinfo"):
url = urljoin(self.root_url, 'bokeh/userinfo/')
self._userinfo = self.get_json(url)
return self._userinfo
@userinfo.setter
def userinfo(self, val):
self._userinfo = val
@property
def base_url(self):
return urljoin(self.root_url, "bokeh/bb/")
def get_api_key(self, docid):
""" Retrieve the document API key from the server.
Args:
docid (string) : docid of the document to retrive API key for
Returns:
apikey : string
"""
url = urljoin(self.root_url,"bokeh/getdocapikey/%s" % docid)
apikey = self.get_json(url)
if 'apikey' in apikey:
apikey = apikey['apikey']
logger.info('got read write apikey')
else:
apikey = apikey['readonlyapikey']
logger.info('got read only apikey')
return apikey
def find_doc(self, name):
""" Return the docid of the document with a title matching ``name``.
.. note::
Creates a new document with the given title if one is not found.
Args:
name (string) : name for the document
Returns:
docid : str
"""
docs = self.userinfo.get('docs')
matching = [x for x in docs if x.get('title') == name]
if len(matching) == 0:
logger.info("No documents found, creating new document '%s'" % name)
self.make_doc(name)
return self.find_doc(name)
elif len(matching) > 1:
logger.warning("Multiple documents with name '%s'" % name)
return matching[0]['docid']
def use_doc(self, name=None, docid=None):
""" Configure the session to use a given document.
Args:
name (str, optional) : name of the document to use
docid (str, optional) : id of the document to use
.. note::
only one of ``name`` or ``docid`` may be supplied.
Creates a document for with the given name if one is not present on
the server.
Returns:
None
"""
if docid is not None and name is not None:
raise ValueError("only one of 'name' or 'docid' can be supplied to use_doc(...)")
if docid:
self.docid = docid
else:
self.docid = self.find_doc(name)
self.apikey = self.get_api_key(self.docid)
def make_doc(self, title):
""" Makes a new document with the given title on the server
.. note:: user information is reloaded
Returns:
None
"""
url = urljoin(self.root_url,"bokeh/doc/")
data = protocol.serialize_json({'title' : title})
self.userinfo = self.post_json(url, data=data)
def pull(self, typename=None, objid=None):
""" Pull JSON objects from the server.
Returns a specific object if both ``typename`` and ``objid`` are
supplied. Otherwise, returns all objects for the currently configured
document.
This is a low-level function.
Args:
typename (str, optional) : name of the type of object to pull
objid (str, optional) : ID of the object to pull
.. note::
you must supply either ``typename`` AND ``objid`` or omit both.
Returns:
attrs : JSON
"""
if typename is None and objid is None:
url = urljoin(self.base_url, self.docid +"/")
attrs = self.get_json(url)
elif typename is None or objid is None:
raise ValueError("typename and objid must both be None, or neither.")
else:
url = urljoin(
self.base_url,
self.docid + "/" + typename + "/" + objid + "/"
)
attr = self.get_json(url)
attrs = [{
'type': typename,
'id': objid,
'attributes': attr
}]
return attrs
def push(self, *jsonobjs):
""" Push JSON objects to the server.
This is a low-level function.
Args:
*jsonobjs (JSON) : objects to push to the server
Returns:
None
"""
data = protocol.serialize_json(jsonobjs)
url = urljoin(self.base_url, self.docid + "/", "bulkupsert")
self.post_json(url, data=data)
def gc(self):
url = urljoin(self.base_url, self.docid + "/", "gc")
self.post_json(url)
# convenience functions to use a session and store/fetch from server
def load_document(self, doc):
""" Loads data for the session and merge with the given document.
Args:
doc (Document) : document to load data into
Returns:
None
"""
self.gc()
json_objs = self.pull()
doc.merge(json_objs)
doc.docid = self.docid
def load_object(self, obj, doc):
""" Update an object in a document with data pulled from the server.
Args:
obj (PlotObject) : object to be updated
doc (Document) : the object's document
Returns:
None
"""
assert obj._id in doc._models
attrs = self.pull(typename=obj.__view_model__, objid=obj._id)
doc.load(*attrs)
def store_document(self, doc, dirty_only=True):
""" Store a document on the server.
Returns the models that were actually pushed.
Args:
doc (Document) : the document to store
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : list[PlotObject]
"""
doc._add_all()
models = doc._models.values()
if dirty_only:
models = [x for x in models if getattr(x, '_dirty', False)]
json_objs = doc.dump(*models)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def store_objects(self, *objs, **kwargs):
""" Store objects on the server
Returns the objects that were actually stored.
Args:
*objs (PlotObject) : objects to store
Keywords Args:
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : set[PlotObject]
"""
models = set()
for obj in objs:
models.update(obj.references())
if kwargs.pop('dirty_only', True):
models = list(models)
json_objs = dump(models, self.docid)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def object_link(self, obj):
""" Return a URL to a server page that will render the given object.
Args:
obj (PlotObject) : object to render
Returns:
URL string
"""
link = "bokeh/doc/%s/%s" % (self.docid, obj._id)
return urljoin(self.root_url, link)
def show(self, obj):
""" Display an object as HTML in IPython using its display protocol.
Args:
obj (PlotObject) : object to display
Returns:
None
"""
data = {'text/html': autoload_server(obj, self)}
publish_display_data(data)
def poll_document(self, document, interval=0.5):
""" Periodically ask the server for updates to the `document`. """
try:
while True:
self.load_document(document)
time.sleep(interval)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
# helper methods
def _prep_data_source_df(self, name, dataframe):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".pandas").name
store = pd.HDFStore(name)
store.append("__data__", dataframe, format="table", data_columns=True)
store.close()
return name
def _prep_data_source_numpy(self, name, arr):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".table").name
store = tables.File(name, 'w')
store.createArray("/", "__data__", obj=arr)
store.close()
return name
class TestSession(Session):
"""Currently, register and login do not work, everything else should work
in theory, but we'll have to test this as we go along and convert tests
"""
def __init__(self, *args, **kwargs):
if 'load_from_config' not in kwargs:
kwargs['load_from_config'] = False
self.client = kwargs.pop('client')
self.headers = {}
super(TestSession, self).__init__(*args, **kwargs)
@property
def username(self):
return self.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.headers.update({'BOKEHUSER-API-KEY': val})
def execute(self, method, url, headers=None, **kwargs):
if headers is None:
headers = {}
func = getattr(self.client, method)
resp = func(url, headers=headers, **kwargs)
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
| bsd-3-clause |
binghongcha08/pyQMD | QTM_F/1D/pH2/cor.py | 2 | 1025 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import matplotlib as mpl
import seaborn as sns
sns.set_context("poster",font_scale=1.5)
sns.set_style({'font.family':'Times New Roman'})
mpl.rcParams['lines.linewidth'] = 2
data = np.genfromtxt(fname='cor.dat')
ncols = data.shape[1]
#for x in range(1,ncols):
#plt.plot(data[:,0],data[:,1],linewidth=2,label='$\Re(C_{xx})$')
plt.plot(data[:,0],data[:,2],linewidth=2,label='$\Im(C_{11})$')
plt.plot(data[:,0],data[:,4],linewidth=2,label='$\Im(C_{22})$')
plt.plot(data[:,0],data[:,6],linewidth=2,label='$\Im(C_{33})$')
plt.plot(data[:,0],data[:,8],linewidth=2,label='$\Im(C_{44})$')
plt.plot(data[:,0],data[:,10],linewidth=2,label='$\Im(C_{12})$')
#plt.plot(data[:,0],data[:,3],linewidth=2,label='$\Re(C_{yy})$')
#plt.plot(data[:,0],data[:,4],linewidth=2,label='$\Im(C_{yy})$')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlim(0,40)
plt.legend(loc=3)
plt.xlabel('Time [a.u.]')
#plt.ylabel('Positions')
plt.savefig('cor.pdf')
plt.show()
| gpl-3.0 |
RobertArbon/YAMLP | SciFlow/NNFlow.py | 1 | 26111 | """
This module implements a Tensorflow single hidden layer neural network as a Scikit learn estimator that is compatible
with Osprey for hyper parameter optimisation.
"""
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
import seaborn as sns
import pandas as pd
class MLPRegFlow(BaseEstimator, ClassifierMixin):
"""
Neural-network with one hidden layer to do regression.
This model optimises the squared error function using the Adam optimiser.
:hidden_layer_sizes: Tuple, length = number of hidden layers, default (0,).
The ith element represents the number of neurons in the ith
hidden layer. In this version, only one hidden layer is supported, so it shouldn't hav
length larger than 1.
:n_units: int, default 45.
Number of neurons in the first hidden layer. This parameter has been added as a hack to make it work with
Osprey.
:alpha: float, default 0.0001
L2 penalty (regularization term) parameter.
:batch_size: int, default 'auto'.
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
:learning_rate_init: double, default 0.001.
The value of the learning rate in the numerical minimisation.
:max_iter: int, default 200.
Total number of iterations that will be carried out during the training process.
"""
def __init__(self, hidden_layer_sizes=(0,), n_units=45, alpha=0.0001, batch_size='auto', learning_rate_init=0.001,
max_iter=80):
# Initialising the parameters
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate_init = learning_rate_init
self.max_iter = max_iter
# This is needed for Osprey, because you can only do parameter optimisation by passing integers or floats,
# not tuples. So here we need a way of dealing with this.
if hidden_layer_sizes == (0,):
self.hidden_layer_sizes = (n_units,)
else:
self.hidden_layer_sizes = hidden_layer_sizes
# Initialising parameters needed for the Tensorflow part
self.w1 = 0
self.b1 = 0
self.w2 = 0
self.b2 = 0
self.alreadyInitialised = False
self.trainCost = []
self.testCost = []
self.isVisReady = False
def fit(self, X, y, *test):
"""
Fit the model to data matrix X and target y.
:X: array of shape (n_samples, n_features).
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,).
This contains the target values for each sample in the X matrix.
:test: list with 1st element an array of shape (n_samples, n_features) and 2nd element an array of shape (n_samples, )
This is a test set to visualise whether the model is overfitting.
"""
print "Starting the fitting process ... \n"
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Modification of the y data, because tensorflow wants a column vector, while scikit learn uses a row vector
y = np.reshape(y, (len(y), 1))
# Checking if a test set has been passed
if test:
if len(test) > 2:
raise TypeError("foo() expected 2 arguments, got %d" % (len(test)))
X_test = test[0]
y_test = test[1]
check_X_y(X_test, y_test)
y_test = np.reshape(y_test, (len(y_test), 1))
# Check that the architecture has only 1 hidden layer
if len(self.hidden_layer_sizes) != 1:
raise ValueError("hidden_layer_sizes expected a tuple of size 1, it has one of size %d. "
"This model currently only supports one hidden layer. " % (len(self.hidden_layer_sizes)))
self.n_feat = X.shape[1]
self.n_samples = X.shape[0]
# Check the value of the batch size
self.batch_size = self.checkBatchSize()
# Initial set up of the NN
X_train = tf.placeholder(tf.float32, [None, self.n_feat])
Y_train = tf.placeholder(tf.float32, [None, 1])
# This part either randomly initialises the weights and biases or restarts training from wherever it was stopped
if self.alreadyInitialised == False:
eps = 0.01
weights1 = tf.Variable(tf.random_normal([self.hidden_layer_sizes[0], self.n_feat]) * 2 * eps - eps)
bias1 = tf.Variable(tf.zeros([self.hidden_layer_sizes[0]]))
weights2 = tf.Variable(tf.random_normal([1, self.hidden_layer_sizes[0]]) * 2 * eps - eps)
bias2 = tf.Variable(tf.zeros([1]))
parameters = [weights1, bias1, weights2, bias2]
self.alreadyInitialised = True
else:
parameters = [tf.Variable(self.w1), tf.Variable(self.b1), tf.Variable(self.w2), tf.Variable(self.b2)]
model = self.modelNN(X_train, parameters)
cost = self.costReg(model, Y_train, [parameters[0], parameters[2]], self.alpha)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_init).minimize(cost)
# Initialisation of the model
init = tf.global_variables_initializer()
# Running the graph
with tf.Session() as sess:
sess.run(init)
for iter in range(self.max_iter):
# This is the total number of batches in which the training set is divided
n_batches = int(self.n_samples / self.batch_size)
# This will be used to calculate the average cost per iteration
avg_cost = 0
# Learning over the batches of data
for i in range(n_batches):
batch_x = X[i * self.batch_size:(i + 1) * self.batch_size, :]
batch_y = y[i * self.batch_size:(i + 1) * self.batch_size, :]
opt, c = sess.run([optimizer, cost], feed_dict={X_train: batch_x, Y_train: batch_y})
avg_cost += c / n_batches
if test and iter % 50 == 0:
optTest, cTest = sess.run([optimizer, cost], feed_dict={X_train: X_test, Y_train: y_test})
self.testCost.append(cTest)
self.trainCost.append(avg_cost)
if iter % 100 == 0:
print "Completed " + str(iter) + " iterations. \n"
self.w1 = sess.run(parameters[0])
self.b1 = sess.run(parameters[1])
self.w2 = sess.run(parameters[2])
self.b2 = sess.run(parameters[3])
def modelNN(self, X, parameters):
"""
This function calculates the model neural network.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:parameters: array of TensorFlow variables of shape (2*len(hidden_layer_sizes+1), )
It contains the weights and the biases for each hidden layer and the output layer.
:returns: A Tensor with the model of the neural network.
"""
# Definition of the model
a1 = tf.add(tf.matmul(X, tf.transpose(parameters[0])), parameters[1]) # output of layer1, size = n_sample x n_hidden_layer
a1 = tf.nn.sigmoid(a1)
model = tf.add(tf.matmul(a1, tf.transpose(parameters[2])), parameters[3]) # output of last layer, size = n_samples x 1
return model
def costReg(self, model, Y_data, weights, regu):
"""
This function calculates the squared error cost function with L2 regularisation.
:model: tensor
This tensor contains the neural network model.
:Y_data: TensorFlow Place holder
This tensor contains the y part of the data once the graph is initialised.
:weights: array of TensorFlow variables of shape (len(hidden_layer_sizes+1), )
It contains the weights for each hidden layer and the output layer.
:regu: float
The parameter that tunes the amount of regularisation.
:return: tensor
it returns the value of the squared error cost function (TF global_variable):
cost = sum_over_samples((model-Y_data)**2)/2 + sum(weights_level_1**2)/2 + sum(weights_level_2**2)/2
"""
cost = tf.nn.l2_loss(t=(model - Y_data))
regulariser = tf.nn.l2_loss(weights[0]) + tf.nn.l2_loss(weights[1])
cost = tf.reduce_mean(cost + regu * regulariser)
return cost
def plotLearningCurve(self):
"""
This function plots the cost versus the number of iterations for the training set and the test set in the
same plot. The cost on the train set is calculated every 50 iterations.
"""
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(self.trainCost, label="Train set", color="b")
iterTest = range(0, self.max_iter, 50)
ax.plot(iterTest, self.testCost, label="Test set", color="red")
ax.set_xlabel('Number of iterations')
ax.set_ylabel('Cost Value')
ax.legend()
plt.yscale("log")
plt.show()
def checkBatchSize(self):
"""
This function is called to check if the batch size has to take the default value or a user-set value.
If it is a user set value, it checks whether it is a reasonable value.
:return: int
The default is 100 or to the total number of samples present if this is smaller than 100. Otherwise it is
checked whether it is smaller than 1 or larger than the total number of samples.
"""
if self.batch_size == 'auto':
batch_size = min(100, self.n_samples)
else:
if self.batch_size < 1 or self.batch_size > self.n_samples:
print "Warning: Got `batch_size` less than 1 or larger than sample size. It is going to be clipped"
batch_size = np.clip(self.batch_size, 1, self.n_samples)
else:
batch_size = self.batch_size
return batch_size
def checkIsFitted(self):
"""
This function checks whether the weights and biases have been changed from their initial values.
:return: True if the weights and biases are not all zero.
"""
if self.alreadyInitialised == False:
raise StandardError("The fit function has not been called yet")
else:
return True
def predict(self, X):
"""
This function uses the X data and plugs it into the model and then returns the predicted y
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:return: array of size (n_samples,)
This contains the predictions for the target values corresponding to the samples contained in X.
"""
print "Calculating the predictions. \n"
if self.checkIsFitted():
check_array(X)
X_test = tf.placeholder(tf.float32, [None, self.n_feat])
parameters = [tf.Variable(self.w1), tf.Variable(self.b1), tf.Variable(self.w2), tf.Variable(self.b2)]
model = self.modelNN(X_test, parameters)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
predictions = sess.run(model, feed_dict={X_test: X})
predictions = np.reshape(predictions,(predictions.shape[0],))
return predictions
else:
raise StandardError("The fit function has not been called yet, so the model has not been trained yet.")
def score(self, X, y, sample_weight=None):
"""
Returns the mean accuracy on the given test data and labels. It calculates the R^2 value. It is used during the
training of the model.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:sample_weight: array of shape (n_samples,)
Sample weights (not sure what this is, but i need it for inheritance from the BaseEstimator)
:return: double
This is a score between -inf and 1 (best value is 1) that tells how good the correlation plot is.
"""
y_pred = self.predict(X)
r2 = r2_score(y, y_pred)
return r2
def scoreFull(self, X, y):
"""
This scores the predictions more thouroughly than the function 'score'. It calculates the r2, the root mean
square error, the mean absolute error and the largest positive/negative outliers. They are all in the units of
the data passed.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:return:
:r2: double
This is a score between -inf and 1 (best value is 1) that tells how good the correlation plot is.
:rmse: double
This is the root mean square error
:mae: double
This is the mean absolute error
:lpo: double
This is the largest positive outlier.
:lno: double
This is the largest negative outlier.
"""
y_pred = self.predict(X)
r2 = r2_score(y, y_pred)
rmse = np.sqrt(mean_squared_error(y, y_pred))
mae = mean_absolute_error(y, y_pred)
lpo, lno = self.largestOutliers(y, y_pred)
return r2, rmse, mae, lpo, lno
def largestOutliers(self, y_true, y_pred):
"""
This function calculates the larges positive and negative outliers from the predictions of the neural net.
:y_true: array of shape (n_samples,)
This contains the target values for each sample.
:y_pred: array of shape (n_samples,)
This contains the neural network predictions of the target values for each sample.
:return:
:lpo: double
This is the largest positive outlier.
:lno: double
This is the largest negative outlier.
"""
diff = y_pred - y_true
lpo = np.amax(diff)
lno = - np.amin(diff)
return lpo, lno
def errorDistribution(self, X, y):
"""
This function plots histograms of how many predictions have an error in a certain range.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
"""
y_pred = self.predict(X)
diff_kJmol = (y - y_pred)*2625.50
df = pd.Series(diff_kJmol, name="Error (kJ/mol)")
# sns.set_style(style='white')
# sns.distplot(df, color="#f1ad1e")
# sns.plt.savefig("ErrorDist.png", transparent=True, dpi=800)
plt.show()
def correlationPlot(self, X, y, ylim=(1.90, 1.78), xlim=(1.90, 1.78)):
"""
This function plots a correlation plot of the values that are in the data set and the NN predictions. It expects
the target values to be in Hartrees.
:X: array of shape (n_samples, n_features)
This contains the input data with samples in the rows and features in the columns.
:y: array of shape (n_samples,)
This contains the target values for each sample in the X matrix.
:ylim: tuple of shape (2,) containing doubles
These are the limits of the y values for the plot.
:xlim: tuple of shape (2,) containing doubles
These are the limits of the x values for the plot.
"""
y_pred = self.predict(X)
df = pd.DataFrame()
df['High level calculated energies (Ha)'] = y
df['NN predicted energies (Ha)'] = y_pred
lm = sns.lmplot('High level calculated energies (Ha)', 'NN predicted energies (Ha)', data=df,
scatter_kws={"s": 20, "alpha": 0.6}, line_kws={"alpha": 0.5})
lm.set(ylim=ylim)
lm.set(xlim=xlim)
plt.show()
def plotWeights(self):
"""
This function plots the weights of the first layer of the neural network as a heat map.
"""
w1_square_tot = []
for i in range(self.hidden_layer_sizes[0]):
w1_square = self.reshape_triang(self.w1[i], 7)
w1_square_tot.append(w1_square)
n = int(np.ceil(np.sqrt(self.hidden_layer_sizes)))
additional = n**2 - self.hidden_layer_sizes[0]
fig, axn = plt.subplots(n, n, sharex=True, sharey=True)
fig.set_size_inches(11.7, 8.27)
cbar_ax = fig.add_axes([.91, .3, .03, .4])
for i, ax in enumerate(axn.flat):
if i >= self.hidden_layer_sizes[0]:
break
df = pd.DataFrame(w1_square_tot[i])
sns.heatmap(df,
ax=ax,
cbar=i == 0,
vmin=-0.2, vmax=0.2,
cbar_ax=None if i else cbar_ax, cmap="PiYG")
fig.tight_layout(rect=[0, 0, 0.9, 1])
sns.plt.savefig("weights_l1.png", transparent=False, dpi=600)
# sns.plt.show()
def reshape_triang(self, X, dim):
"""
This function reshapes a single flattened triangular matrix back to a square diagonal matrix.
:X: array of shape (n_atoms*(n_atoms+1)/2, )
This contains a sample of the Coulomb matrix trimmed down so that it contains only the a triangular matrix.
:dim: int
The triangular matrix X will be reshaped to a matrix that has size dim by dim.
:return: array of shape (n_atoms, n_atoms)
This contains the square diagonal matrix.
"""
x_square = np.zeros((dim, dim))
counter = 0
for i in range(dim):
for j in range(i, dim):
x_square[i][j] = X[counter]
x_square[j][i] = X[counter]
counter = counter + 1
return x_square
def __vis_input(self, initial_guess):
"""
This function does gradient ascent to generate an input that gives the highest activation for each neuron of
the first hidden layer.
:initial_guess: array of shape (n_features,)
A coulomb matrix to use as the initial guess to the gradient ascent in the hope that the closest local
maximum will be found.
:return: list of arrays of shape (num_atoms, num_atoms)
each numpy array is the input for a particular neuron that gives the highest activation.
"""
self.isVisReady = True
initial_guess = np.reshape(initial_guess, newshape=(1, initial_guess.shape[0]))
input_x = tf.Variable(initial_guess, dtype=tf.float32)
activations = []
iterations = 7000
lambda_reg = 0.0002
self.x_square_tot = []
for node in range(self.hidden_layer_sizes[0]):
# Calculating the activation of the first layer
w1_node = tf.constant(self.w1[node], shape=(1,self.n_feat))
b1_node = tf.constant(self.b1[node])
z1 = tf.add(tf.matmul(tf.abs(input_x), tf.transpose(w1_node)), b1_node)
a1 = tf.nn.sigmoid(z1)
a1_reg = a1 - lambda_reg * tf.tensordot(input_x, tf.transpose(input_x), axes=1)
# Function to maximise a1
optimiser = tf.train.AdamOptimizer(learning_rate=0.01).minimize(-a1_reg)
# Initialising the model
init = tf.global_variables_initializer()
# Running the graph
with tf.Session() as sess:
sess.run(init)
for i in range(iterations):
sess.run(optimiser)
temp_a1 = sess.run(a1)
activations.append(temp_a1) # Calculating the activation for checking later if a node has converged
final_x = sess.run(input_x) # Storing the best input
x_square = self.reshape_triang(final_x[0,:], 7)
self.x_square_tot.append(x_square)
print "The activations at the end of the optimisations are:"
print activations
return self.x_square_tot
def vis_input_matrix(self, initial_guess, write_plot=False):
"""
This function calculates the inputs that would give the highest activations of the neurons in the first hidden
layer of the neural network. It then plots them as a heat map.
:initial_guess: array of shape (n_features,)
A coulomb matrix to use as the initial guess to the gradient ascent in the hope that the closest local
maximum will be found.
:write_plot: boolean, default False
If this is true, the plot is written to a png file.
"""
if self.isVisReady == False:
self.x_square_tot = self.__vis_input(initial_guess)
n = int(np.ceil(np.sqrt(self.hidden_layer_sizes)))
additional = n ** 2 - self.hidden_layer_sizes[0]
fig, axn = plt.subplots(n, n, sharex=True, sharey=True)
fig.set_size_inches(11.7, 8.27)
cbar_ax = fig.add_axes([.91, .3, .03, .4])
counter = 0
for i, ax in enumerate(axn.flat):
df = pd.DataFrame(self.x_square_tot[counter])
ax.set(xticks=[], yticks=[])
sns.heatmap(df, ax=ax, cbar=i == 0, cmap='RdYlGn',
vmax=8, vmin=-8,
cbar_ax=None if i else cbar_ax)
counter = counter + 1
if counter >= self.hidden_layer_sizes[0]:
break
fig.tight_layout(rect=[0, 0, 0.9, 1])
if write_plot==True:
sns.plt.savefig("high_a1_input.png", transparent=False, dpi=600)
sns.plt.show()
def vis_input_network(self, initial_guess, write_plot=False):
"""
This function calculates the inputs that would give the highest activations of the neurons in the first hidden
layer of the neural network. It then plots them as a netwrok graph.
:initial_guess: array of shape (n_features,)
A coulomb matrix to use as the initial guess to the gradient ascent in the hope that the closest local
maximum will be found.
:write_plot: boolean, default False
If this is true, the plot is written to a png file.
"""
import networkx as nx
if self.isVisReady == False:
self.x_square_tot = self.__vis_input(initial_guess)
n = int(np.ceil(np.sqrt(self.hidden_layer_sizes)))
fig = plt.figure(figsize=(10, 8))
for i in range(n**2):
if i >= self.hidden_layer_sizes[0]:
break
fig.add_subplot(n,n,1+i)
A = np.matrix(self.x_square_tot[i])
graph2 = nx.from_numpy_matrix(A, parallel_edges=False)
# nodes and their label
# pos = {0: np.array([0.46887886, 0.06939788]), 1: np.array([0, 0.26694294]),
# 2: np.array([0.3, 0.56225267]),
# 3: np.array([0.13972517, 0.]), 4: np.array([0.6, 0.9]), 5: np.array([0.27685853, 0.31976436]),
# 6: np.array([0.72, 0.9])}
pos = {}
for i in range(7):
x_point = 0.6*np.cos((i+1)*2*np.pi/7)
y_point = 0.6*np.sin((i+1)*2*np.pi/7)
pos[i] = np.array([x_point, y_point])
labels = {}
labels[0] = 'H'
labels[1] = 'H'
labels[2] = 'H'
labels[3] = 'H'
labels[4] = 'C'
labels[5] = 'C'
labels[6] = 'N'
node_size = np.zeros(7)
for i in range(7):
node_size[i] = abs(graph2[i][i]['weight'])*10
nx.draw_networkx_nodes(graph2, pos, node_size=node_size)
nx.draw_networkx_labels(graph2, pos, labels=labels, font_size=15, font_family='sans-serif', font_color='blue')
# edges
edgewidth = [d['weight'] for (u, v, d) in graph2.edges(data=True)]
nx.draw_networkx_edges(graph2, pos, width=edgewidth)
plt.axis('off')
if write_plot==True:
plt.savefig("high_a1_network.png") # save as png
plt.show() # display
# This example tests the module on fitting a simple quadratic function and then plots the results
if __name__ == "__main__":
estimator = MLPRegFlow(hidden_layer_sizes=(5,), learning_rate_init=0.01, max_iter=5000, alpha=0)
# pickle.dump(silvia, open('../tests/model.pickl','wb'))
x = np.arange(-2.0, 2.0, 0.05)
X = np.reshape(x, (len(x), 1))
y = np.reshape(X ** 3, (len(x),))
estimator.fit(X, y)
y_pred = estimator.predict(X)
# Visualisation of predictions
fig2, ax2 = plt.subplots(figsize=(6,6))
ax2.scatter(x, y, label="original", marker="o", c="r")
ax2.scatter(x, y_pred, label="predictions", marker="o", c='b')
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.legend()
# Correlation plot
fig3, ax3 = plt.subplots(figsize=(6,6))
ax3.scatter(y, y_pred, marker="o", c="r")
ax3.set_xlabel('original y')
ax3.set_ylabel('prediction y')
plt.show()
estimator.errorDistribution(X, y)
| mit |
loli/sklearn-ensembletrees | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tools/merge.py | 4 | 41529 | """
SQL-style merge routines
"""
import types
import numpy as np
from pandas.compat import range, long, lrange, lzip, zip, map, filter
import pandas.compat as compat
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
from pandas.core.generic import NDFrame
from pandas.core.series import Series
from pandas.core.index import (Index, MultiIndex, _get_combined_index,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
from pandas.core.common import ABCSeries
from pandas.io.parsers import TextFileReader
import pandas.core.common as com
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def ordered_merge(left, right, on=None, left_by=None, right_by=None,
left_on=None, right_on=None,
fill_method=None, suffixes=('_x', '_y')):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
"""
def _merger(x, y):
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
# left_index=left_index, right_index=right_index,
suffixes=suffixes, fill_method=fill_method)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
if not isinstance(left_by, (list, tuple)):
left_by = [left_by]
pieces = []
for key, xpiece in left.groupby(left_by):
merged = _merger(xpiece, right)
for k in left_by:
# May have passed ndarray
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
elif right_by is not None:
if not isinstance(right_by, (list, tuple)):
right_by = [right_by]
pieces = []
for key, ypiece in right.groupby(right_by):
merged = _merger(left, ypiece)
for k in right_by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
else:
return _merger(left, right)
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='merge')
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
# insert group keys
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
if name in result:
key_col = result[name]
if left_indexer is not None and right_indexer is not None:
if name in self.left:
na_indexer = (left_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
right_na_indexer = right_indexer.take(na_indexer)
key_col.put(
na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
elif name in self.right:
na_indexer = (right_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
left_na_indexer = left_indexer.take(na_indexer)
key_col.put(na_indexer, com.take_1d(self.left_join_keys[i],
left_na_indexer))
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
if name is None:
name = 'key_%d' % i
# a faster way?
key_col = com.take_1d(self.left_join_keys[i], left_indexer)
na_indexer = (left_indexer == -1).nonzero()[0]
right_na_indexer = right_indexer.take(na_indexer)
key_col.put(na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
result.insert(i, name, key_col)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index:
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort, how=self.how)
if self.right_index:
join_index = self.left.index.take(left_indexer)
elif self.left_index:
join_index = self.right.index.take(right_indexer)
else:
join_index = Index(np.arange(len(left_indexer)))
return join_index, left_indexer, right_indexer
def _get_merge_data(self):
"""
Handles overlapping column names etc.
"""
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(
ldata.items, lsuf, rdata.items, rsuf)
if not llabels.equals(ldata.items):
ldata = ldata.copy(deep=False)
ldata.set_axis(0, llabels)
if not rlabels.equals(rdata.items):
rdata = rdata.copy(deep=False)
rdata.set_axis(0, rlabels)
return ldata, rdata
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
self._validate_specification()
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(left)
is_rkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(right)
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
right_keys.append(right[rk].values)
join_names.append(rk)
else:
if not is_rkey(rk):
right_keys.append(right[rk].values)
if lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
left_keys.append(left[lk].values)
join_names.append(lk)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k].values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev.values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k].values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev.values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if (self.on is None and self.left_on is None
and self.right_on is None):
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
if not self.left.columns.is_unique:
raise MergeError("Left data columns not unique: %s"
% repr(self.left.columns))
if not self.right.columns.is_unique:
raise MergeError("Right data columns not unique: %s"
% repr(self.right.columns))
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass on OR left_on and '
'right_on')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
"""
Parameters
----------
Returns
-------
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip( * map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = {'sort':sort} if how == 'left' else {}
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
def __init__(self, left, right, on=None, by=None, left_on=None,
right_on=None, axis=1, left_index=False, right_index=False,
suffixes=('_x', '_y'), copy=True,
fill_method=None):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, axis=axis,
left_index=left_index,
right_index=right_index,
how='outer', suffixes=suffixes,
sort=True # sorts when factorizing
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = algos.ffill_indexer(left_indexer)
right_join_indexer = algos.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='ordered_merge')
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip( * map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return algos.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = \
algos.left_outer_join(com._ensure_int64(left_key),
com._ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = algos.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': algos.inner_join,
'left': algos.left_outer_join,
'right': _right_outer_join,
'outer': algos.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if com.is_int_or_datetime_dtype(lk) and com.is_int_or_datetime_dtype(rk):
klass = _hash.Int64Factorizer
lk = com._ensure_int64(lk)
rk = com._ensure_int64(rk)
else:
klass = _hash.Factorizer
lk = com._ensure_object(lk)
rk = com._ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
new_left = reverse_indexer.take(com._ensure_platform_int(left))
np.putmask(new_left, left == -1, -1)
new_right = reverse_indexer.take(com._ensure_platform_int(right))
np.putmask(new_right, right == -1, -1)
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
from pandas.core.groupby import _int64_overflow_possible
# how many levels can be done without overflow
pred = lambda i: not _int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
#----------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
concatenation axis, which may be useful if the labels are the same (or
overlapping) on the passed axis number
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0, 1, ...}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
if keys is None:
objs = [obj for obj in objs if obj is not None ]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
keys = clean_keys
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj.consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties
# if we have not multi-index possibiltes
df = DataFrame([ obj.shape for obj in objs ]).sum(1)
non_empties = df[df!=0]
if len(non_empties) and (keys is None and names is None and levels is None and join_axes is None):
objs = [ objs[i] for i in non_empties.index ]
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim-1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj,'name',None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({ name : obj })
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
new_data = com._concat_compat([x.values for x in self.objs])
name = com._consensus_name_attr(self.objs)
return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
tmpdf = DataFrame(data, index=index)
if columns is not None:
tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
if self._is_series:
all_indexes = [x.index for x in self.objs]
else:
try:
all_indexes = [x._data.axes[i] for x in self.objs]
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
return _get_combined_index(all_indexes, intersect=self.intersect)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True # arange is always unique
return idx
elif self.keys is None:
names = []
for x in self.objs:
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names.append(x.name)
else:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True
return idx
return Index(names)
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = Index(np.arange(sum(len(i) for i in indexes)))
idx.is_unique = True
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
levels = [Categorical.from_array(zp, ordered=True).categories for zp in zipped]
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
factor = Categorical.from_array(concat_index, ordered=True)
levels.append(factor.categories)
label_list.append(factor.codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([ i.nlevels for i in indexes ])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
def _should_fill(lname, rname):
if not isinstance(lname, compat.string_types) or not isinstance(rname, compat.string_types):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| gpl-2.0 |
astroclark/numrel_bursts | nrburst_utils/nrburst_bwreducemoments.py | 1 | 4907 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2017 James Clark <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
nrburst_pickle_bwplot.py
"""
import os,sys
import cPickle as pickle
import timeit
import numpy as np
from matplotlib import pyplot as pl
import pycbc.types
import pycbc.filter
def overlap(wave0,wave1,fmin=16,delta_t=1./1024,norm=True):
wave0td = pycbc.types.TimeSeries(wave0, delta_t=delta_t)
wave1td = pycbc.types.TimeSeries(wave1, delta_t=delta_t)
overlap=pycbc.filter.overlap(wave0td, wave1td, low_frequency_cutoff=fmin,
normalized=norm)
return overlap
def whiten(wave, asdarray, delta_t=1./1024):
wavetd = pycbc.types.TimeSeries(wave, delta_t=delta_t)
wavefd = wavetd.to_frequencyseries()
asd=pycbc.types.FrequencySeries(np.zeros(len(wavefd)),
delta_f=wavefd.delta_f)
idx = wavefd.sample_frequencies.data >= min(asdarray[:,0])
asd.data[idx] = asdarray[:,1]
asd.data[np.invert(idx)]=1.0
wavefd_white = wavefd/asd
return wavefd_white.to_timeseries()
#
# Input
#
if len(sys.argv)>=3:
injfile=sys.argv[2]
print "loading data from %s"%injfile
statinfo = os.stat(injfile)
print "results pickle is %.2f G"%(statinfo.st_size / 1024 / 1024 / 1024.)
then = timeit.time.time()
injset = pickle.load(open(injfile,'rb'))
now = timeit.time.time()
print "results took %dm, %ds to load"%(divmod(now-then,60))
else:
print "Using data in environment"
fmin=int(sys.argv[1])
print "Using fmin=%d for overlaps"%fmin
outname = injfile.replace('.pickle','') + '-fmin_%d'%fmin
print "Dumping moments to %s"%outname
#
# Allocation
#
for var in injset[0].keys():
vars()[var] = injset[0][var]
nmoments=10001
netoverlaps = np.zeros(shape=(len(injset), nmoments-1))
mynetoverlaps = np.zeros(shape=(len(injset), len(IFO0_whitened_signal)))
netsnr = np.zeros(shape=(len(injset)))
h1snr = np.zeros(shape=(len(injset)))
l1snr = np.zeros(shape=(len(injset)))
snrratio = np.zeros(shape=(len(injset)))
Zsignal = np.zeros(shape=(len(injset),2))
for i in xrange(len(injset)):
print "Reading injection %d/%d"%(i+1, len(injset))
for var in injset[i].keys():
vars()[var] = injset[i][var]
#
# SNR
#
h1snr[i] = float(snr[0][1])
l1snr[i] = float(snr[1][1])
snrratio[i] = max(h1snr[i]/l1snr[i], l1snr[i]/h1snr[i])
netsnr[i] = float(snr[2][1])
#
# Evidence
#
Zsignal[i][0] = float(evidence[2][1])
Zsignal[i][1] = float(evidence[2][2])
#
# Overlaps
#
netoverlaps[i,:] = [IFO1_signal_moments[j][-3] for j in
xrange(1,len(IFO1_signal_moments))]
#
# Manual calculation of network overlap (to facilitate different fmin)
#
for j in xrange(len(IFO0_whitened_signal)):
IFO0_whitened_injection = whiten(H1_timeInjection[:,1], IFO0_ASD)
IFO1_whitened_injection = whiten(L1_timeInjection[:,1], IFO1_ASD)
ri = overlap(IFO0_whitened_signal[j], IFO0_whitened_injection,
fmin=fmin, norm=False) + overlap(IFO1_whitened_signal[j],
IFO1_whitened_injection, fmin=fmin, norm=False)
ii = overlap(IFO0_whitened_injection, IFO0_whitened_injection,
fmin=fmin, norm=False) + overlap(IFO1_whitened_injection,
IFO1_whitened_injection, fmin=fmin, norm=False)
rr = overlap(IFO0_whitened_signal[j], IFO0_whitened_signal[j],
fmin=fmin, norm=False) + overlap(IFO1_whitened_signal[j],
IFO1_whitened_signal[j], fmin=fmin, norm=False)
mynetoverlaps[i,j] = ri / np.sqrt(ii*rr)
median_overlap = np.array([np.median(netoverlaps[i]) for i in
xrange(len(injset))])
std_overlap = np.array([np.std(netoverlaps[i]) for i in xrange(len(injset))])
#
# Now clean up and save workspace
#
# This gives us almost all the characteristics we need to compare injection sets
np.savez(file=outname,
netoverlaps = netoverlaps,
mynetoverlaps = mynetoverlaps,
netsnr = netsnr,
snrratio = snrratio,
Zsignal = Zsignal,
median_overlap = median_overlap,
std_overlap = std_overlap)
| gpl-2.0 |
jkeung/yellowbrick | tests/test_style/test_palettes.py | 2 | 10127 | # tests.test_style.test_palettes
# Tests the palettes module of the yellowbrick library.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Tue Oct 04 16:21:58 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_palettes.py [] [email protected] $
"""
Tests the palettes module of the yellowbrick library.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import unittest
import numpy as np
import matplotlib as mpl
from yellowbrick.exceptions import *
from yellowbrick.style.palettes import *
from yellowbrick.style.colors import get_color_cycle
from yellowbrick.style.rcmod import set_aesthetic, set_palette
from yellowbrick.style.palettes import color_sequence, color_palette
from yellowbrick.style.palettes import ColorPalette, PALETTES, SEQUENCES
from tests.base import VisualTestCase
##########################################################################
## Color Palette Tests
##########################################################################
class ColorPaletteObjectTests(VisualTestCase):
"""
Tests the ColorPalette object
"""
def test_init_palette_by_name(self):
"""
Test that a palette can be initialized by name
"""
# Try all the names in the palettes
for name, value in PALETTES.items():
try:
palette = ColorPalette(name)
except YellowbrickValueError:
self.fail(
"Could not instantiate {} color palette by name".format(name)
)
self.assertEqual(value, palette)
# Try a name not in PALETTES
with self.assertRaises(YellowbrickValueError):
self.assertNotIn('foo', PALETTES, "Cannot test bad name 'foo' it is in PALETTES!")
palette = ColorPalette('foo')
def test_init_palette_by_list(self):
"""
Test that a palette can be initialized by a list
"""
# Try all the values in the palettes (HEX)
for value in PALETTES.values():
palette = ColorPalette(value)
self.assertEqual(len(value), len(palette))
# Try all the values converted to RGB
for value in PALETTES.values():
palette = ColorPalette(map(mpl.colors.colorConverter.to_rgb, value))
self.assertEqual(len(value), len(palette))
def test_color_palette_context(self):
"""
Test ColorPalette context management
"""
default = color_palette()
context = color_palette('dark')
with ColorPalette('dark') as palette:
self.assertIsInstance(palette, ColorPalette)
self.assertEqual(get_color_cycle(), context)
self.assertEqual(get_color_cycle(), default)
def test_as_hex_as_rgb(self):
"""
Test the conversion of a ColorPalette to hex values and back to rgb
"""
palette = color_palette('flatui')
expected = PALETTES['flatui']
morgified = palette.as_hex()
self.assertIsNot(morgified, palette)
self.assertIsInstance(morgified, ColorPalette)
self.assertEqual(morgified, expected)
remorgified = morgified.as_rgb()
self.assertIsNot(remorgified, morgified)
self.assertIsNot(remorgified, palette)
self.assertEqual(remorgified, palette)
@unittest.skip("not implemented yet")
def test_plot_color_palette(self):
"""
Test the plotting of a color palette for color visualization
"""
raise NotImplementedError(
"Not quite sure how to implement this yet"
)
class ColorPaletteFunctionTests(VisualTestCase):
"""
Tests the color_palette function.
"""
def test_current_palette(self):
"""
Test modifying the current palette with a simple palette
"""
pal = color_palette(["red", "blue", "green"], 3)
set_palette(pal, 3)
self.assertEqual(pal, get_color_cycle())
# Reset the palette
set_aesthetic()
def test_palette_context(self):
"""
Test the context manager for the color_palette function
"""
default_pal = color_palette()
context_pal = color_palette("muted")
with color_palette(context_pal):
self.assertEqual(get_color_cycle(), context_pal)
self.assertEqual(get_color_cycle(), default_pal)
def test_big_palette_context(self):
"""
Test that the context manager also resets the number of colors
"""
original_pal = color_palette("accent", n_colors=8)
context_pal = color_palette("bold", 10)
set_palette(original_pal)
with color_palette(context_pal, 10):
self.assertEqual(get_color_cycle(), context_pal)
self.assertEqual(get_color_cycle(), original_pal)
# Reset default
set_aesthetic()
def test_yellowbrick_palettes(self):
"""
Test the yellowbrick palettes have length 6 (bgrmyck)
"""
pals = ["accent", "dark", "pastel", "bold", "muted"]
for name in pals:
pal_out = color_palette(name)
self.assertEqual(len(pal_out), 6, "{} is not of len 6".format(name))
def test_seaborn_palettes(self):
"""
Test the seaborn palettes have length 6 (bgrmyck)
"""
pals = ["sns_deep", "sns_muted", "sns_pastel",
"sns_bright", "sns_dark", "sns_colorblind"]
for name in pals:
pal_out = color_palette(name)
self.assertEqual(len(pal_out), 6)
def test_bad_palette_name(self):
"""
Test that a bad palette name raises an exception
"""
with self.assertRaises(ValueError):
color_palette("IAmNotAPalette")
with self.assertRaises(YellowbrickValueError):
color_palette("IAmNotAPalette")
def test_bad_palette_colors(self):
"""
Test that bad color names raise an exception
"""
pal = ["red", "blue", "iamnotacolor"]
with self.assertRaises(ValueError):
color_palette(pal)
with self.assertRaises(YellowbrickValueError):
color_palette(pal)
def test_palette_is_list_of_tuples(self):
"""
Assert that color_palette returns a list of RGB tuples
"""
pal_in = np.array(["red", "blue", "green"])
pal_out = color_palette(pal_in, 3)
self.assertIsInstance(pal_out, list)
self.assertIsInstance(pal_out[0], tuple)
self.assertIsInstance(pal_out[0][0], float)
self.assertEqual(len(pal_out[0]), 3)
def test_palette_cycles(self):
"""
Test that the color palette cycles for more colors
"""
accent = color_palette("accent")
double_accent = color_palette("accent", 12)
self.assertEqual(double_accent, accent + accent)
@unittest.skip("Discovered this commented out, don't know why")
def test_cbrewer_qual(self):
"""
Test colorbrewer qualitative palettes
"""
pal_short = mpl_palette("Set1", 4)
pal_long = mpl_palette("Set1", 6)
self.assertEqual(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
self.assertEqual(pal_full, pal_long[:8])
def test_color_codes(self):
"""
Test the setting of color codes
"""
set_color_codes("accent")
colors = color_palette("accent") + ["0.06666666666666667"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
self.assertEqual(rgb_want, rgb_got)
set_color_codes("reset")
def test_as_hex(self):
"""
Test converting a color palette to hex and back to rgb.
"""
pal = color_palette("accent")
for rgb, hex in zip(pal, pal.as_hex()):
self.assertEqual(mpl.colors.rgb2hex(rgb), hex)
for rgb_e, rgb_v in zip(pal, pal.as_hex().as_rgb()):
self.assertEqual(rgb_e, rgb_v)
def test_get_color_cycle(self):
"""
Test getting the default color cycle
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = get_color_cycle()
expected = mpl.rcParams['axes.color_cycle']
self.assertEqual(result, expected)
def test_preserved_palette_length(self):
"""
Test palette length is preserved when modified
"""
pal_in = color_palette("Set1", 10)
pal_out = color_palette(pal_in)
self.assertEqual(pal_in, pal_out)
def test_color_sequence(self):
"""
Ensure the color sequence returns listed colors.
"""
for name, ncols in SEQUENCES.items():
for n in ncols.keys():
cmap = color_sequence(name, n)
self.assertEqual(name, cmap.name)
self.assertEqual(n, cmap.N)
def test_color_sequence_default(self):
"""
Assert the default color sequence is RdBu
"""
cmap = color_sequence()
self.assertEqual(cmap.name, "RdBu")
self.assertEqual(cmap.N, 11)
def test_color_sequence_unrecocognized(self):
"""
Test value errors for unrecognized sequences
"""
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('PepperBucks', 3)
def test_color_sequence_bounds(self):
"""
Test color sequence out of bounds value error
"""
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('RdBu', 18)
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('RdBu', 2)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
mdeff/ntds_2017 | projects/reports/movie_network/python/costs_function_parallelized.py | 1 | 2617 | import numpy as np
import pandas as pd
from multiprocessing import Pool
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
def cost(x):
costs = np.zeros(Movies.shape[0])
current_film = Movies.iloc[x]
genres_current = get_genres(current_film)
kw_current = get_keywords(current_film)
for j in range(x,Movies.shape[0]):
cost = 0
b_film = Movies.iloc[j]
genres_b = get_genres(b_film)
#First we only select the first genre to determine the similarity because it's more important that the other genre.
if len(genres_current) > 0 and len(genres_b) > 0:
if (genres_current[0] == genres_b[0]):
cost += first_genre
#This give us the number of similar genres. We pop the first one because we already compare them.
cost += np.sum(np.in1d(genres_current,genres_b.pop(0),assume_unique='True')) * second_genre
kw_b = get_keywords(b_film)
#This give us the number of similar keywords.
cost += np.sum(np.in1d(kw_current,kw_b,assume_unique='True')) * keyword_cost
costs[j] = cost
return costs
def get_genres(film):
genres = str(film['genres'])
if genres == 'nan':
return[]
else:
genres = genres.split(",")
return genres
def get_keywords(film):
kw = str(film['keywords'])
if kw == 'nan':
return[]
else:
kw = kw.split(",")
return kw
def vote_ratio(x,costs):
vote_x = Movies.iloc[x]['vote_average']
for j in range(0,Movies.shape[0]):
vote_j = Movies.iloc[j]['vote_average']
costs[i,j] = costs[i,j] * vote_j / vote_x
if __name__ == '__main__':
#Constant definition
#Cost added if the first genre is similar between two films
first_genre = 5
#Cost added if the secondary genre is similar between two films
second_genre = 1
#Cost added by similar keyword identical between two films
keyword_cost = 1
usefull_columns = ['genres','keywords','vote_average']
Movies = pd.read_csv("../Datasets/Transformed.csv",usecols=usefull_columns)
Movies = Movies.loc[Movies['vote_average'] > 0]
with Pool(cpu_count()) as p:
r = list(tqdm(p.imap(cost, range(0,Movies.shape[0])), total=Movies.shape[0]))
costs = np.array(r)
costs = costs + costs.T
r = list(tqdm(p.imap(vote_ratio, [range(Movies.shape[0],costs)]), total=Movies.shape[0]))
costs = np.array(r)
np.savez_compressed("../Datasets/costs_parallelized.npz", costs, costs = costs)
| mit |
arbuz001/sms-tools | lectures/08-Sound-transformations/plots-code/hps-morph.py | 24 | 2691 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile1='../../../sounds/violin-B3.wav'
window1='blackman'
M1=1001
N1=1024
t1=-100
minSineDur1=0.05
nH=60
minf01=200
maxf01=300
f0et1=10
harmDevSlope1=0.01
stocf=0.1
inputFile2='../../../sounds/soprano-E4.wav'
window2='blackman'
M2=901
N2=1024
t2=-100
minSineDur2=0.05
minf02=250
maxf02=500
f0et2=10
harmDevSlope2=0.01
Ns = 512
H = 128
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
hfreqIntp = np.array([0, .5, 1, .5])
hmagIntp = np.array([0, .5, 1, .5])
stocIntp = np.array([0, .5, 1, .5])
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs1)
UF.wavwrite(y,fs1, 'hps-morph.wav')
plt.figure(figsize=(12, 9))
frame = 200
plt.subplot(2,3,1)
plt.vlines(hfreq1[frame,:], -100, hmag1[frame,:], lw=1.5, color='b')
plt.axis([0,5000, -80, -15])
plt.title('x1: harmonics')
plt.subplot(2,3,2)
plt.vlines(hfreq2[frame,:], -100, hmag2[frame,:], lw=1.5, color='r')
plt.axis([0,5000, -80, -15])
plt.title('x2: harmonics')
plt.subplot(2,3,3)
yhfreq[frame,:][yhfreq[frame,:]==0] = np.nan
plt.vlines(yhfreq[frame,:], -100, yhmag[frame,:], lw=1.5, color='c')
plt.axis([0,5000, -80, -15])
plt.title('y: harmonics')
stocaxis = (fs1/2)*np.arange(stocEnv1[0,:].size)/float(stocEnv1[0,:].size)
plt.subplot(2,3,4)
plt.plot(stocaxis, stocEnv1[frame,:], lw=1.5, marker='x', color='b')
plt.axis([0,20000, -73, -27])
plt.title('x1: stochastic')
plt.subplot(2,3,5)
plt.plot(stocaxis, stocEnv2[frame,:], lw=1.5, marker='x', color='r')
plt.axis([0,20000, -73, -27])
plt.title('x2: stochastic')
plt.subplot(2,3,6)
plt.plot(stocaxis, ystocEnv[frame,:], lw=1.5, marker='x', color='c')
plt.axis([0,20000, -73, -27])
plt.title('y: stochastic')
plt.tight_layout()
plt.savefig('hps-morph.png')
plt.show()
| agpl-3.0 |
LiaoPan/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
devanshdalal/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
SachinJanani/zeppelin | spark/src/main/resources/python/zeppelin_pyspark.py | 16 | 12106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
import ast
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
output = Logger()
sys.stdout = output
sys.stderr = output
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
jobDesc = req.jobDescription()
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, jobDesc)
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
shangwuhencc/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
hayd/SimpleCV | SimpleCV/Tracking/SURFTracker.py | 12 | 4623 | from SimpleCV.base import np, itertools
try:
import cv2
except ImportError:
pass
def surfTracker(img, bb, ts, **kwargs):
"""
**DESCRIPTION**
(Dev Zone)
Tracking the object surrounded by the bounding box in the given
image using SURF keypoints.
Warning: Use this if you know what you are doing. Better have a
look at Image.track()
**PARAMETERS**
* *img* - Image - Image to be tracked.
* *bb* - tuple - Bounding Box tuple (x, y, w, h)
* *ts* - TrackSet - SimpleCV.Features.TrackSet.
Optional PARAMETERS:
eps_val - eps for DBSCAN
The maximum distance between two samples for them
to be considered as in the same neighborhood.
min_samples - min number of samples in DBSCAN
The number of samples in a neighborhood for a point
to be considered as a core point.
distance - thresholding KNN distance of each feature
if KNN distance > distance, point is discarded.
**RETURNS**
SimpleCV.Features.Tracking.SURFTracker
**HOW TO USE**
>>> cam = Camera()
>>> ts = []
>>> img = cam.getImage()
>>> bb = (100, 100, 300, 300) # get BB from somewhere
>>> ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
>>> while (some_condition_here):
... img = cam.getImage()
... bb = ts[-1].bb
... ts = surfTracker(img, bb, ts, eps_val=0.7, distance=150)
... ts[-1].drawBB()
... img.show()
This is too much confusing. Better use
Image.track() method.
READ MORE:
SURF based Tracker:
Matches keypoints from the template image and the current frame.
flann based matcher is used to match the keypoints.
Density based clustering is used classify points as in-region (of bounding box)
and out-region points. Using in-region points, new bounding box is predicted using
k-means.
"""
eps_val = 0.69
min_samples = 5
distance = 100
for key in kwargs:
if key == 'eps_val':
eps_val = kwargs[key]
elif key == 'min_samples':
min_samples = kwargs[key]
elif key == 'dist':
distance = kwargs[key]
from scipy.spatial import distance as Dis
from sklearn.cluster import DBSCAN
if len(ts) == 0:
# Get template keypoints
bb = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
templateImg = img
detector = cv2.FeatureDetector_create("SURF")
descriptor = cv2.DescriptorExtractor_create("SURF")
templateImg_cv2 = templateImg.getNumpyCv2()[bb[1]:bb[1]+bb[3], bb[0]:bb[0]+bb[2]]
tkp = detector.detect(templateImg_cv2)
tkp, td = descriptor.compute(templateImg_cv2, tkp)
else:
templateImg = ts[-1].templateImg
tkp = ts[-1].tkp
td = ts[-1].td
detector = ts[-1].detector
descriptor = ts[-1].descriptor
newimg = img.getNumpyCv2()
# Get image keypoints
skp = detector.detect(newimg)
skp, sd = descriptor.compute(newimg, skp)
if td is None:
print "Descriptors are Empty"
return None
if sd is None:
track = SURFTracker(img, skp, detector, descriptor, templateImg, skp, sd, tkp, td)
return track
# flann based matcher
flann_params = dict(algorithm=1, trees=4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params={})
del flann
# filter points using distnace criteria
dist = (dist[:,0]/2500.0).reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = sorted(range(len(dist)), key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
skp_final = []
skp_final_labelled=[]
data_cluster=[]
for i, dis in itertools.izip(idx, dist):
if dis < distance:
skp_final.append(skp[i])
data_cluster.append((skp[i].pt[0], skp[i].pt[1]))
#Use Denstiy based clustering to further fitler out keypoints
n_data = np.asarray(data_cluster)
D = Dis.squareform(Dis.pdist(n_data))
S = 1 - (D/np.max(D))
db = DBSCAN(eps=eps_val, min_samples=min_samples).fit(S)
core_samples = db.core_sample_indices_
labels = db.labels_
for label, i in zip(labels, range(len(labels))):
if label==0:
skp_final_labelled.append(skp_final[i])
track = SURFTrack(img, skp_final_labelled, detector, descriptor, templateImg, skp, sd, tkp, td)
return track
from SimpleCV.Tracking import SURFTrack
| bsd-3-clause |
arahuja/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
soravux/deap | examples/ga/kursawefct.py | 12 | 2948 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import logging
import random
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_float", random.uniform, -5, 5)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, 3)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def checkBounds(min, max):
def decorator(func):
def wrappper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in range(len(child)):
if child[i] > max:
child[i] = max
elif child[i] < min:
child[i] = min
return offspring
return wrappper
return decorator
toolbox.register("evaluate", benchmarks.kursawe)
toolbox.register("mate", tools.cxBlend, alpha=1.5)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=3, indpb=0.3)
toolbox.register("select", tools.selNSGA2)
toolbox.decorate("mate", checkBounds(-5, 5))
toolbox.decorate("mutate", checkBounds(-5, 5))
def main():
random.seed(64)
MU, LAMBDA = 50, 100
pop = toolbox.population(n=MU)
hof = tools.ParetoFront()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
cxpb=0.5, mutpb=0.2, ngen=150,
stats=stats, halloffame=hof)
return pop, stats, hof
if __name__ == "__main__":
pop, stats, hof = main()
# import matplotlib.pyplot as plt
# import numpy
#
# front = numpy.array([ind.fitness.values for ind in pop])
# plt.scatter(front[:,0], front[:,1], c="b")
# plt.axis("tight")
# plt.show()
| lgpl-3.0 |
Endika/addons-yelizariev | import_custom/wizard/upload.py | 16 | 1822 | from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import tools
import logging
_logger = logging.getLogger(__name__)
import base64
import tempfile
try:
import MySQLdb
import MySQLdb.cursors
from pandas import DataFrame
except ImportError:
pass
from ..import_custom import import_custom
import tarfile
import shutil
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import os
import glob
class import_custom_upload(osv.TransientModel):
_name = "import_custom.upload"
_description = "Upload dumps"
_columns = {
'file': fields.char('file (*.tar.gz)'),
}
def upload_button(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids[0])
tmp_dir,files = self.unzip_file(record.file.strip(), pattern='*.csv')
_logger.info('files: %s'%files)
instance = import_custom(self.pool, cr, uid,
'yelizariev', #instance_name
'import_custom', # module_name
run_import=False,
import_dir = '/home/tmp/',
context={'csv_files': files},
)
instance.run()
try:
shutil.rmtree(tmp_dir)
except:
pass
return instance
def unzip_file(self, filename, pattern='*'):
'''
extract *.tar.gz files
returns list of extracted file names
'''
tar = tarfile.open(name=filename)
dir = tempfile.mkdtemp(prefix='tmp_import_custom')
tar.extractall(path=dir)
return dir, glob.glob('%s/%s' % (dir, pattern))+glob.glob('%s/*/%s' % (dir, pattern))
| lgpl-3.0 |
cainiaocome/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
Erotemic/utool | utool/util_graph.py | 1 | 85803 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import numpy as np
except ImportError:
pass
try:
import networkx as nx
except ImportError:
pass
import collections
import functools
from utool import util_inject
from utool import util_const
from six.moves import reduce, zip, range
import itertools as it
(print, rrr, profile) = util_inject.inject2(__name__)
def nx_topsort_nodes(graph, nodes):
import utool as ut
node_rank = ut.nx_topsort_rank(graph, nodes)
node_idx = ut.rebase_labels(node_rank)
sorted_nodes = ut.take(nodes, node_idx)
return sorted_nodes
def nx_topsort_rank(graph, nodes=None):
"""
graph = inputs.exi_graph.reverse()
nodes = flat_node_order_
"""
import utool as ut
if False:
# Determenistic version
# Ok, this doesn't work.
dag_ranks = nx_dag_node_rank(graph, nodes)
toprank = ut.argsort(dag_ranks, list(map(str, nodes)))
else:
# Non-determenistic version
dag_ranks = nx_dag_node_rank(graph, nodes)
topsort = list(nx.topological_sort(graph))
# print('topsort = %r' % (topsort,))
node_to_top_rank = ut.make_index_lookup(topsort)
toprank = ut.dict_take(node_to_top_rank, nodes)
return toprank
def nx_common_descendants(graph, node1, node2):
descendants1 = nx.descendants(graph, node1)
descendants2 = nx.descendants(graph, node2)
common_descendants = set.intersection(descendants1, descendants2)
return common_descendants
def nx_common_ancestors(graph, node1, node2):
ancestors1 = nx.ancestors(graph, node1)
ancestors2 = nx.ancestors(graph, node2)
common_ancestors = set.intersection(ancestors1, ancestors2)
return common_ancestors
def nx_make_adj_matrix(G):
import utool as ut
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
def nx_transitive_reduction(G, mode=1):
"""
References:
https://en.wikipedia.org/wiki/Transitive_reduction#Computing_the_reduction_using_the_closure
http://dept-info.labri.fr/~thibault/tmp/0201008.pdf
http://stackoverflow.com/questions/17078696/transitive-reduction-of-directed-graph-in-python
CommandLine:
python -m utool.util_graph nx_transitive_reduction --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> G = testdata_graph()[1]
>>> G_tr = nx_transitive_reduction(G, mode=1)
>>> G_tr2 = nx_transitive_reduction(G, mode=1)
>>> ut.quit_if_noshow()
>>> try:
>>> import plottool_ibeis as pt
>>> except ImportError:
>>> import plottool as pt
>>> G_ = nx.dag.transitive_closure(G)
>>> pt.show_nx(G , pnum=(1, 5, 1), fnum=1)
>>> pt.show_nx(G_tr , pnum=(1, 5, 2), fnum=1)
>>> pt.show_nx(G_tr2 , pnum=(1, 5, 3), fnum=1)
>>> pt.show_nx(G_ , pnum=(1, 5, 4), fnum=1)
>>> pt.show_nx(nx.dag.transitive_closure(G_tr), pnum=(1, 5, 5), fnum=1)
>>> ut.show_if_requested()
"""
import utool as ut
has_cycles = not nx.is_directed_acyclic_graph(G)
if has_cycles:
# FIXME: this does not work for cycle graphs.
# Need to do algorithm on SCCs
G_orig = G
G = nx.condensation(G_orig)
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
# For each node u, perform DFS consider its set of (non-self) children C.
# For each descendant v, of a node in C, remove any edge from u to v.
if mode == 1:
G_tr = G.copy()
for parent in G_tr.nodes():
# Remove self loops
if G_tr.has_edge(parent, parent):
G_tr.remove_edge(parent, parent)
# For each child of the parent
for child in list(G_tr.successors(parent)):
# Preorder nodes includes its argument (no added complexity)
for gchild in list(G_tr.successors(child)):
# Remove all edges from parent to non-child descendants
for descendant in nx.dfs_preorder_nodes(G_tr, gchild):
if G_tr.has_edge(parent, descendant):
G_tr.remove_edge(parent, descendant)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
else:
def make_adj_matrix(G):
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
G_ = nx.dag.transitive_closure(G)
A = make_adj_matrix(G)
B = make_adj_matrix(G_)
#AB = A * B
#AB = A.T.dot(B)
AB = A.dot(B)
#AB = A.dot(B.T)
A_and_notAB = np.logical_and(A, np.logical_not(AB))
tr_uvs = np.where(A_and_notAB)
#nodes = G.nodes()
edges = list(zip(*ut.unflat_take(nodes, tr_uvs)))
G_tr = G.__class__()
G_tr.add_nodes_from(nodes)
G_tr.add_edges_from(edges)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
return G_tr
def nx_source_nodes(graph):
# for node in nx.dag.topological_sort(graph):
for node in graph.nodes():
if graph.in_degree(node) == 0:
yield node
def nx_sink_nodes(graph):
# for node in nx.dag.topological_sort(graph):
for node in graph.nodes():
if graph.out_degree(node) == 0:
yield node
# def nx_sink_nodes(graph):
# topsort_iter = nx.dag.topological_sort(graph)
# sink_iter = (node for node in topsort_iter
# if graph.out_degree(node) == 0)
# return sink_iter
def nx_to_adj_dict(graph):
import utool as ut
adj_dict = ut.ddict(list)
for u, edges in graph.adjacency():
adj_dict[u].extend(list(edges.keys()))
adj_dict = dict(adj_dict)
return adj_dict
def nx_from_adj_dict(adj_dict, cls=None):
if cls is None:
cls = nx.DiGraph
nodes = list(adj_dict.keys())
edges = [(u, v) for u, adj in adj_dict.items() for v in adj]
graph = cls()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
def nx_dag_node_rank(graph, nodes=None):
"""
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
"""
import utool as ut
source = list(ut.nx_source_nodes(graph))[0]
longest_paths = dict([(target, dag_longest_path(graph, source, target))
for target in graph.nodes()])
node_to_rank = ut.map_dict_vals(len, longest_paths)
if nodes is None:
return node_to_rank
else:
ranks = ut.dict_take(node_to_rank, nodes)
return ranks
def nx_all_nodes_between(graph, source, target, data=False):
"""
Find all nodes with on paths between source and target.
"""
import utool as ut
if source is None:
# assume there is a single source
sources = list(ut.nx_source_nodes(graph))
assert len(sources) == 1, (
'specify source if there is not only one')
source = sources[0]
if target is None:
# assume there is a single source
sinks = list(ut.nx_sink_nodes(graph))
assert len(sinks) == 1, (
'specify sink if there is not only one')
target = sinks[0]
all_simple_paths = list(nx.all_simple_paths(graph, source, target))
nodes = sorted(set.union(*map(set, all_simple_paths)))
return nodes
def nx_all_simple_edge_paths(G, source, target, cutoff=None, keys=False,
data=False):
"""
Returns each path from source to target as a list of edges.
This function is meant to be used with MultiGraphs or MultiDiGraphs.
When ``keys`` is True each edge in the path is returned with its unique key
identifier. In this case it is possible to distinguish between different
paths along different edges between the same two nodes.
Derived from simple_paths.py in networkx
"""
if cutoff is None:
cutoff = len(G) - 1
if cutoff < 1:
return
import utool as ut
import six
visited_nodes = [source]
visited_edges = []
if G.is_multigraph():
get_neighbs = ut.partial(G.edges, keys=keys, data=data)
else:
get_neighbs = ut.partial(G.edges, data=data)
edge_stack = [iter(get_neighbs(source))]
while edge_stack:
children_edges = edge_stack[-1]
child_edge = six.next(children_edges, None)
if child_edge is None:
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
elif len(visited_nodes) < cutoff:
child_node = child_edge[1]
if child_node == target:
yield visited_edges + [child_edge]
elif child_node not in visited_nodes:
visited_nodes.append(child_node)
visited_edges.append(child_edge)
edge_stack.append(iter(get_neighbs(child_node)))
else:
for edge in [child_edge] + list(children_edges):
if edge[1] == target:
yield visited_edges + [edge]
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
def nx_edges_between(graph, nodes1, nodes2=None, assume_disjoint=False,
assume_sparse=True):
r"""
Get edges between two components or within a single component
Args:
graph (nx.Graph): the graph
nodes1 (set): list of nodes
nodes2 (set): (default=None) if None it is equivlanet to nodes2=nodes1
assume_disjoint (bool): skips expensive check to ensure edges arnt
returned twice (default=False)
CommandLine:
python -m utool.util_graph --test-nx_edges_between
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> edges = [
>>> (1, 2), (2, 3), (3, 4), (4, 1), (4, 3), # cc 1234
>>> (1, 5), (7, 2), (5, 1), # cc 567 / 5678
>>> (7, 5), (5, 6), (8, 7),
>>> ]
>>> digraph = nx.DiGraph(edges)
>>> graph = nx.Graph(edges)
>>> nodes1 = [1, 2, 3, 4]
>>> nodes2 = [5, 6, 7]
>>> n2 = sorted(nx_edges_between(graph, nodes1, nodes2))
>>> n4 = sorted(nx_edges_between(graph, nodes1))
>>> n5 = sorted(nx_edges_between(graph, nodes1, nodes1))
>>> n1 = sorted(nx_edges_between(digraph, nodes1, nodes2))
>>> n3 = sorted(nx_edges_between(digraph, nodes1))
>>> print('n2 == %r' % (n2,))
>>> print('n4 == %r' % (n4,))
>>> print('n5 == %r' % (n5,))
>>> print('n1 == %r' % (n1,))
>>> print('n3 == %r' % (n3,))
>>> assert n2 == ([(1, 5), (2, 7)]), '2'
>>> assert n4 == ([(1, 2), (1, 4), (2, 3), (3, 4)]), '4'
>>> assert n5 == ([(1, 2), (1, 4), (2, 3), (3, 4)]), '5'
>>> assert n1 == ([(1, 5), (5, 1), (7, 2)]), '1'
>>> assert n3 == ([(1, 2), (2, 3), (3, 4), (4, 1), (4, 3)]), '3'
>>> n6 = sorted(nx_edges_between(digraph, nodes1 + [6], nodes2 + [1, 2], assume_sparse=True))
>>> print('n6 = %r' % (n6,))
>>> n6 = sorted(nx_edges_between(digraph, nodes1 + [6], nodes2 + [1, 2], assume_sparse=False))
>>> print('n6 = %r' % (n6,))
>>> assert n6 == ([(1, 2), (1, 5), (2, 3), (4, 1), (5, 1), (5, 6), (7, 2)]), '6'
Timeit:
from utool.util_graph import * # NOQA
# ut.timeit_compare()
import networkx as nx
import utool as ut
graph = nx.fast_gnp_random_graph(1000, .001)
list(nx.connected_components(graph))
rng = np.random.RandomState(0)
nodes1 = set(rng.choice(list(graph.nodes()), 500, replace=False))
nodes2 = set(graph.nodes()) - nodes1
edges_between = ut.nx_edges_between
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=True))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=False))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=False))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=True))
graph = nx.fast_gnp_random_graph(1000, .1)
rng = np.random.RandomState(0)
print(graph.number_of_edges())
nodes1 = set(rng.choice(list(graph.nodes()), 500, replace=False))
nodes2 = set(graph.nodes()) - nodes1
edges_between = ut.nx_edges_between
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=True, assume_disjoint=True))
%timeit list(edges_between(graph, nodes1, nodes2, assume_sparse=False, assume_disjoint=True))
Ignore:
graph = nx.DiGraph(edges)
graph = nx.Graph(edges)
nodes1 = [1, 2, 3, 4]
nodes2 = nodes1
"""
if assume_sparse:
# Method 1 is where we check the intersection of existing edges
# and the edges in the second set (faster for sparse graphs)
# helpers nx_edges between
def _node_combo_lower(graph, both):
both_lower = set([])
for u in both:
neighbs = set(graph.adj[u])
neighbsBB_lower = neighbs.intersection(both_lower)
for v in neighbsBB_lower:
yield (u, v)
both_lower.add(u)
def _node_combo_upper(graph, both):
both_upper = both.copy()
for u in both:
neighbs = set(graph.adj[u])
neighbsBB_upper = neighbs.intersection(both_upper)
for v in neighbsBB_upper:
yield (u, v)
both_upper.remove(u)
def _node_product(graph, only1, only2):
for u in only1:
neighbs = set(graph.adj[u])
neighbs12 = neighbs.intersection(only2)
for v in neighbs12:
yield (u, v)
# Test for special cases
if nodes2 is None or nodes2 is nodes1:
# Case where we just are finding internal edges
both = set(nodes1)
if graph.is_directed():
edge_sets = (
_node_combo_upper(graph, both), # B-to-B (upper)
_node_combo_lower(graph, both), # B-to-B (lower)
)
else:
edge_sets = (
_node_combo_upper(graph, both), # B-to-B (upper)
)
elif assume_disjoint:
# Case where we find edges between disjoint sets
only1 = set(nodes1)
only2 = set(nodes2)
if graph.is_directed():
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only2, only1), # 2-to-1
)
else:
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
)
else:
# Full general case
nodes1_ = set(nodes1)
if nodes2 is None:
nodes2_ = nodes1_
else:
nodes2_ = set(nodes2)
both = nodes1_.intersection(nodes2_)
only1 = nodes1_ - both
only2 = nodes2_ - both
# This could be made faster by avoiding duplicate
# calls to set(graph.adj[u]) in the helper functions
if graph.is_directed():
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only1, both), # 1-to-B
_node_combo_upper(graph, both), # B-to-B (u)
_node_combo_lower(graph, both), # B-to-B (l)
_node_product(graph, both, only1), # B-to-1
_node_product(graph, both, only2), # B-to-2
_node_product(graph, only2, both), # 2-to-B
_node_product(graph, only2, only1), # 2-to-1
)
else:
edge_sets = (
_node_product(graph, only1, only2), # 1-to-2
_node_product(graph, only1, both), # 1-to-B
_node_combo_upper(graph, both), # B-to-B (u)
_node_product(graph, only2, both), # 2-to-B
)
for u, v in it.chain.from_iterable(edge_sets):
yield u, v
else:
# Method 2 is where we enumerate all possible edges and just take the
# ones that exist (faster for very dense graphs)
if nodes2 is None or nodes2 is nodes1:
edge_iter = it.combinations(nodes1, 2)
else:
if assume_disjoint:
# We assume len(isect(nodes1, nodes2)) == 0
edge_iter = it.product(nodes1, nodes2)
else:
# make sure a single edge is not returned twice
# in the case where len(isect(nodes1, nodes2)) > 0
nodes1_ = set(nodes1)
nodes2_ = set(nodes2)
nodes_isect = nodes1_.intersection(nodes2_)
nodes_only1 = nodes1_ - nodes_isect
nodes_only2 = nodes2_ - nodes_isect
edge_sets = [it.product(nodes_only1, nodes_only2),
it.product(nodes_only1, nodes_isect),
it.product(nodes_only2, nodes_isect),
it.combinations(nodes_isect, 2)]
edge_iter = it.chain.from_iterable(edge_sets)
if graph.is_directed():
for n1, n2 in edge_iter:
if graph.has_edge(n1, n2):
yield n1, n2
if graph.has_edge(n2, n1):
yield n2, n1
else:
for n1, n2 in edge_iter:
if graph.has_edge(n1, n2):
yield n1, n2
def nx_delete_node_attr(graph, name, nodes=None):
"""
Removes node attributes
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.karate_club_graph()
>>> nx.set_node_attributes(G, name='foo', values='bar')
>>> datas = nx.get_node_attributes(G, 'club')
>>> assert len(nx.get_node_attributes(G, 'club')) == 34
>>> assert len(nx.get_node_attributes(G, 'foo')) == 34
>>> ut.nx_delete_node_attr(G, ['club', 'foo'], nodes=[1, 2])
>>> assert len(nx.get_node_attributes(G, 'club')) == 32
>>> assert len(nx.get_node_attributes(G, 'foo')) == 32
>>> ut.nx_delete_node_attr(G, ['club'])
>>> assert len(nx.get_node_attributes(G, 'club')) == 0
>>> assert len(nx.get_node_attributes(G, 'foo')) == 32
"""
if nodes is None:
nodes = list(graph.nodes())
removed = 0
# names = [name] if not isinstance(name, list) else name
node_dict = nx_node_dict(graph)
if isinstance(name, list):
for node in nodes:
for name_ in name:
try:
del node_dict[node][name_]
removed += 1
except KeyError:
pass
else:
for node in nodes:
try:
del node_dict[node][name]
removed += 1
except KeyError:
pass
return removed
@profile
def nx_delete_edge_attr(graph, name, edges=None):
"""
Removes an attributes from specific edges in the graph
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.karate_club_graph()
>>> nx.set_edge_attributes(G, name='spam', values='eggs')
>>> nx.set_edge_attributes(G, name='foo', values='bar')
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 78
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 78
>>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2)])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 77
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 77
>>> ut.nx_delete_edge_attr(G, ['spam'])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 0
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 77
Doctest:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.MultiGraph()
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (4, 5), (1, 2)])
>>> nx.set_edge_attributes(G, name='spam', values='eggs')
>>> nx.set_edge_attributes(G, name='foo', values='bar')
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 6
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 6
>>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2, 0)])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 5
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 5
>>> ut.nx_delete_edge_attr(G, ['spam'])
>>> assert len(nx.get_edge_attributes(G, 'spam')) == 0
>>> assert len(nx.get_edge_attributes(G, 'foo')) == 5
"""
removed = 0
keys = [name] if not isinstance(name, (list, tuple)) else name
if edges is None:
if graph.is_multigraph():
edges = graph.edges(keys=True)
else:
edges = graph.edges()
if graph.is_multigraph():
for u, v, k in edges:
for key_ in keys:
try:
del graph[u][v][k][key_]
removed += 1
except KeyError:
pass
else:
for u, v in edges:
for key_ in keys:
try:
del graph[u][v][key_]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_edge_attr(graph, edges=None):
removed = 0
if graph.is_multigraph():
if edges is None:
edges = list(graph.edges(keys=graph.is_multigraph()))
for edge in edges:
u, v, k = edge
data = graph[u][v][k]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
else:
if edges is None:
edges = list(graph.edges())
for edge in graph.edges():
u, v = edge
data = graph[u][v]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_node_attr(graph, nodes=None):
removed = 0
if nodes is None:
nodes = list(graph.nodes())
for node in graph.nodes():
node_dict = nx_node_dict(graph)
data = node_dict[node]
for key in list(data.keys()):
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_set_default_node_attributes(graph, key, val):
unset_nodes = [n for n, d in graph.nodes(data=True) if key not in d]
if isinstance(val, dict):
values = {n: val[n] for n in unset_nodes if n in val}
else:
values = {n: val for n in unset_nodes}
nx.set_node_attributes(graph, name=key, values=values)
def nx_set_default_edge_attributes(graph, key, val):
unset_edges = [(u, v) for u, v, d in graph.edges(data=True) if key not in d]
if isinstance(val, dict):
values = {e: val[e] for e in unset_edges if e in val}
else:
values = {e: val for e in unset_edges}
nx.set_edge_attributes(graph, name=key, values=values)
def nx_get_default_edge_attributes(graph, key, default=None):
import utool as ut
edge_list = list(graph.edges())
partial_attr_dict = nx.get_edge_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, edge_list, default=default)
return attr_dict
def nx_get_default_node_attributes(graph, key, default=None):
import utool as ut
node_list = list(graph.nodes())
partial_attr_dict = nx.get_node_attributes(graph, key)
attr_dict = ut.dict_subset(partial_attr_dict, node_list, default=default)
return attr_dict
def nx_gen_node_values(G, key, nodes, default=util_const.NoParam):
"""
Generates attributes values of specific nodes
"""
node_dict = nx_node_dict(G)
if default is util_const.NoParam:
return (node_dict[n][key] for n in nodes)
else:
return (node_dict[n].get(key, default) for n in nodes)
def nx_gen_node_attrs(G, key, nodes=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_node_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Notes:
strategies are:
error - raises an error if key or node does not exist
default - returns node, but uses value specified by default
filter - skips the node
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error'))
Example:
>>> # DISABLE_DOCTEST
>>> # ALL CASES
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3)])
>>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'})
>>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'})
>>> nodes = [1, 2, 3, 4]
>>> attrs = dict(ut.nx_gen_node_attrs(G, 'full'))
>>> input_grid = {
>>> 'nodes': [None, (1, 2, 3, 4)],
>>> 'key': ['part', 'full'],
>>> 'default': [util_const.NoParam, None],
>>> }
>>> inputs = ut.all_dict_combinations(input_grid)
>>> kw_grid = {
>>> 'on_missing': ['error', 'default', 'filter'],
>>> 'on_keyerr': ['error', 'default', 'filter'],
>>> }
>>> kws = ut.all_dict_combinations(kw_grid)
>>> for in_ in inputs:
>>> for kw in kws:
>>> kw2 = ut.dict_union(kw, in_)
>>> #print(kw2)
>>> on_missing = kw['on_missing']
>>> on_keyerr = kw['on_keyerr']
>>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam:
>>> on_keyerr = 'error'
>>> will_miss = False
>>> will_keyerr = False
>>> if on_missing == 'error':
>>> if in_['key'] == 'part' and in_['nodes'] is not None:
>>> will_miss = True
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_miss = True
>>> if on_keyerr == 'error':
>>> if in_['key'] == 'part':
>>> will_keyerr = True
>>> if on_missing == 'default':
>>> if in_['key'] == 'full' and in_['nodes'] is not None:
>>> will_keyerr = True
>>> want_error = will_miss or will_keyerr
>>> gen = ut.nx_gen_node_attrs(G, **kw2)
>>> try:
>>> attrs = list(gen)
>>> except KeyError:
>>> if not want_error:
>>> raise AssertionError('should not have errored')
>>> else:
>>> if want_error:
>>> raise AssertionError('should have errored')
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if nodes is None:
nodes = G.nodes()
# Generate `node_data` nodes and data dictionary
node_dict = nx_node_dict(G)
if on_missing == 'error':
node_data = ((n, node_dict[n]) for n in nodes)
elif on_missing == 'filter':
node_data = ((n, node_dict[n]) for n in nodes if n in G)
elif on_missing == 'default':
node_data = ((n, node_dict.get(n, {})) for n in nodes)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `node_attrs` desired value out of dictionary
if on_keyerr == 'error':
node_attrs = ((n, d[key]) for n, d in node_data)
elif on_keyerr == 'filter':
node_attrs = ((n, d[key]) for n, d in node_data if key in d)
elif on_keyerr == 'default':
node_attrs = ((n, d.get(key, default)) for n, d in node_data)
else:
raise KeyError('on_keyerr={} must be error filter or default'.format(on_keyerr))
return node_attrs
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
"""
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
# Generate `data_iter` edges and data dictionary
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `value_iter` desired value out of dictionary
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter
# if default is util_const.NoParam:
# return (G.adj[u][v][key] for u, v in edges)
# else:
# return (G.adj[u][v].get(key, default) for u, v in edges)
def nx_gen_edge_attrs(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Improved generator version of nx.get_edge_attributes
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default', 'filter'}. defaults to 'error'.
is on_missing is not error, then we allow any edge even if the
endpoints are not in the graph.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default', 'filter'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> G = nx.Graph([(1, 2), (2, 3), (3, 4)])
>>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'})
>>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)]
>>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None)
>>> #
>>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3
>>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3
>>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error'))
>>> #
>>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4
>>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2
>>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error'))
"""
if on_missing is None:
on_missing = 'error'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if edges is None:
if G.is_multigraph():
raise NotImplementedError('')
# uvk_iter = G.edges(keys=True)
else:
edges = G.edges()
# Generate `edge_data` edges and data dictionary
if on_missing == 'error':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges)
elif on_missing == 'filter':
edge_data = (((u, v), G.adj[u][v]) for u, v in edges if G.has_edge(u, v))
elif on_missing == 'default':
edge_data = (((u, v), G.adj[u][v])
if G.has_edge(u, v) else ((u, v), {})
for u, v in edges)
else:
raise KeyError('on_missing={}'.format(on_missing))
# Get `edge_attrs` desired value out of dictionary
if on_keyerr == 'error':
edge_attrs = ((e, d[key]) for e, d in edge_data)
elif on_keyerr == 'filter':
edge_attrs = ((e, d[key]) for e, d in edge_data if key in d)
elif on_keyerr == 'default':
edge_attrs = ((e, d.get(key, default)) for e, d in edge_data)
else:
raise KeyError('on_keyerr={}'.format(on_keyerr))
return edge_attrs
# if edges is None:
# if G.is_multigraph():
# edges_ = G.edges(keys=True, data=True)
# else:
# edges_ = G.edges(data=True)
# if default is util_const.NoParam:
# return ((x[:-1], x[-1][key]) for x in edges_ if key in x[-1])
# else:
# return ((x[:-1], x[-1].get(key, default)) for x in edges_)
# else:
# if on_missing == 'error':
# uv_iter = edges
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# elif on_missing == 'filter':
# # filter edges that don't exist
# uv_iter = (e for e in edges if G.has_edge(*e))
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# elif on_missing == 'default':
# # Return default data as if it existed
# uvd_iter = (
# (u, v, G.adj[u][v])
# if G.has_edge(u, v) else
# (u, v, {})
# for u, v in uv_iter
# )
# else:
# raise KeyError('on_missing={}'.format(on_missing))
# if default is util_const.NoParam:
# # return (((u, v), d[key]) for u, v, d in uvd_iter if key in d)
# return (((u, v), d[key]) for u, v, d in uvd_iter)
# else:
# uvd_iter = ((u, v, G.adj[u][v]) for u, v in uv_iter)
# return (((u, v), d.get(key, default)) for u, v, d in uvd_iter)
def nx_from_node_edge(nodes=None, edges=None):
graph = nx.Graph()
if nodes:
graph.add_nodes_from(nodes)
if edges:
graph.add_edges_from(edges)
return graph
def nx_minimum_weight_component(graph, weight='weight'):
""" A minimum weight component is an MST + all negative edges """
mwc = nx.minimum_spanning_tree(graph, weight=weight)
# negative edges only reduce the total weight
neg_edges = (e for e, w in nx_gen_edge_attrs(graph, weight) if w < 0)
mwc.add_edges_from(neg_edges)
return mwc
def nx_from_matrix(weight_matrix, nodes=None, remove_self=True):
import utool as ut
import numpy as np
if nodes is None:
nodes = list(range(len(weight_matrix)))
weight_list = weight_matrix.ravel()
flat_idxs_ = np.arange(weight_matrix.size)
multi_idxs_ = np.unravel_index(flat_idxs_, weight_matrix.shape)
# Remove 0 weight edges
flags = np.logical_not(np.isclose(weight_list, 0))
weight_list = ut.compress(weight_list, flags)
multi_idxs = ut.compress(list(zip(*multi_idxs_)), flags)
edge_list = ut.lmap(tuple, ut.unflat_take(nodes, multi_idxs))
if remove_self:
flags = [e1 != e2 for e1, e2 in edge_list]
edge_list = ut.compress(edge_list, flags)
weight_list = ut.compress(weight_list, flags)
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edge_list)
label_list = ['%.2f' % w for w in weight_list]
nx.set_edge_attributes(graph, name='weight', values=dict(zip(edge_list, weight_list)))
nx.set_edge_attributes(graph, name='label', values=dict(zip(edge_list, label_list)))
return graph
def nx_ensure_agraph_color(graph):
""" changes colors to hex strings on graph attrs """
try:
from plottool_ibeis import color_funcs
import plottool_ibeis as pt
except ImportError:
from plottool import color_funcs
import plottool as pt
#import six
def _fix_agraph_color(data):
try:
orig_color = data.get('color', None)
alpha = data.get('alpha', None)
color = orig_color
if color is None and alpha is not None:
color = [0, 0, 0]
if color is not None:
color = pt.ensure_nonhex_color(color)
#if isinstance(color, np.ndarray):
# color = color.tolist()
color = list(color_funcs.ensure_base255(color))
if alpha is not None:
if len(color) == 3:
color += [int(alpha * 255)]
else:
color[3] = int(alpha * 255)
color = tuple(color)
if len(color) == 3:
data['color'] = '#%02x%02x%02x' % color
else:
data['color'] = '#%02x%02x%02x%02x' % color
except Exception as ex:
import utool as ut
ut.printex(ex, keys=['color', 'orig_color', 'data'])
raise
for node, node_data in graph.nodes(data=True):
data = node_data
_fix_agraph_color(data)
for u, v, edge_data in graph.edges(data=True):
data = edge_data
_fix_agraph_color(data)
def nx_edges(graph, keys=False, data=False):
if graph.is_multigraph():
edges = graph.edges(keys=keys, data=data)
else:
edges = graph.edges(data=data)
#if keys:
# edges = [e[0:2] + (0,) + e[:2] for e in edges]
return edges
def dag_longest_path(graph, source, target):
"""
Finds the longest path in a dag between two nodes
"""
if source == target:
return [source]
allpaths = nx.all_simple_paths(graph, source, target)
longest_path = []
for l in allpaths:
if len(l) > len(longest_path):
longest_path = l
return longest_path
def testdata_graph():
r"""
Returns:
tuple: (graph, G)
CommandLine:
python -m utool.util_graph --exec-testdata_graph --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> (graph, G) = testdata_graph()
>>> import plottool as pt
>>> ut.ensureqt()
>>> pt.show_nx(G, layout='agraph')
>>> ut.show_if_requested()
"""
import utool as ut
# Define adjacency list
graph = {
'a': ['b'],
'b': ['c', 'f', 'e'],
'c': ['g', 'd'],
'd': ['c', 'h'],
'e': ['a', 'f'],
'f': ['g'],
'g': ['f'],
'h': ['g', 'd'],
'i': ['j'],
'j': [],
}
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a', 'e'],
'e': ['c'],
}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a']}
graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a'], 'f': ['c']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['b']}
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': []} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'f'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e'], 'd': ['e']} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e', 'b'], 'd': ['e']} # double pair in non-scc
# Extract G = (V, E)
nodes = list(graph.keys())
edges = ut.flatten([[(v1, v2) for v2 in v2s] for v1, v2s in graph.items()])
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
if False:
G.remove_node('e')
del graph['e']
for val in graph.values():
try:
val.remove('e')
except ValueError:
pass
return graph, G
def dict_depth(dict_, accum=0):
if not isinstance(dict_, dict):
return accum
return max([dict_depth(val, accum + 1)
for key, val in dict_.items()])
def edges_to_adjacency_list(edges):
import utool as ut
children_, parents_ = list(zip(*edges))
parent_to_children = ut.group_items(parents_, children_)
#to_leafs = {tablename: path_to_leafs(tablename, parent_to_children)}
return parent_to_children
def paths_to_root(tablename, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-paths_to_root:0
python -m utool.util_graph --exec-paths_to_root:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> root = 'dummy_annot'
>>> tablename = 'fgweight'
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
{
'keypoint': {
'chip': {
'dummy_annot': None,
},
},
'probchip': {
'dummy_annot': None,
},
}
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> root = u'annotations'
>>> tablename = u'Notch_Tips'
>>> child_to_parents = {
>>> 'Block_Curvature': [
>>> 'Trailing_Edge',
>>> ],
>>> 'Has_Notch': [
>>> 'annotations',
>>> ],
>>> 'Notch_Tips': [
>>> 'annotations',
>>> ],
>>> 'Trailing_Edge': [
>>> 'Notch_Tips',
>>> ],
>>> }
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
"""
if tablename == root:
return None
parents = child_to_parents[tablename]
return {parent: paths_to_root(parent, root, child_to_parents)
for parent in parents}
def get_allkeys(dict_):
import utool as ut
if not isinstance(dict_, dict):
return []
subkeys = [[key] + get_allkeys(val)
for key, val in dict_.items()]
return ut.unique_ordered(ut.flatten(subkeys))
def traverse_path(start, end, seen_, allkeys, mat):
import utool as ut
if seen_ is None:
seen_ = set([])
index = allkeys.index(start)
sub_indexes = np.where(mat[index])[0]
if len(sub_indexes) > 0:
subkeys = ut.take(allkeys, sub_indexes)
# subkeys_ = ut.take(allkeys, sub_indexes)
# subkeys = [subkey for subkey in subkeys_
# if subkey not in seen_]
# for sk in subkeys:
# seen_.add(sk)
if len(subkeys) > 0:
return {subkey: traverse_path(subkey, end, seen_, allkeys, mat)
for subkey in subkeys}
return None
def reverse_path(dict_, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-reverse_path --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> to_root = {
>>> 'fgweight': {
>>> 'keypoint': {
>>> 'chip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> }
>>> reversed_ = reverse_path(to_root, 'dummy_annot', child_to_parents)
>>> result = ut.repr3(reversed_)
>>> print(result)
{
'dummy_annot': {
'chip': {
'keypoint': {
'fgweight': None,
},
},
'probchip': {
'fgweight': None,
},
},
}
"""
# Hacky but illustrative
# TODO; implement non-hacky version
allkeys = get_allkeys(dict_)
mat = np.zeros((len(allkeys), len(allkeys)))
for key in allkeys:
if key != root:
for parent in child_to_parents[key]:
rx = allkeys.index(parent)
cx = allkeys.index(key)
mat[rx][cx] = 1
end = None
seen_ = set([])
reversed_ = {root: traverse_path(root, end, seen_, allkeys, mat)}
return reversed_
def get_levels(dict_, n=0, levels=None):
r"""
DEPCIRATE
Args:
dict_ (dict_): a dictionary
n (int): (default = 0)
levels (None): (default = None)
CommandLine:
python -m utool.util_graph --test-get_levels --show
python3 -m utool.util_graph --test-get_levels --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> from_root = {
>>> 'dummy_annot': {
>>> 'chip': {
>>> 'keypoint': {
>>> 'fgweight': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'fgweight': None,
>>> },
>>> },
>>> }
>>> dict_ = from_root
>>> n = 0
>>> levels = None
>>> levels_ = get_levels(dict_, n, levels)
>>> result = ut.repr2(levels_, nl=1)
>>> print(result)
[
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
['fgweight'],
]
"""
if levels is None:
levels_ = [[] for _ in range(dict_depth(dict_))]
else:
levels_ = levels
if dict_ is None:
return []
for key in dict_.keys():
levels_[n].append(key)
for val in dict_.values():
get_levels(val, n + 1, levels_)
return levels_
def longest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-longest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = longest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint'],
['fgweight'],
]
"""
return shortest_levels(levels_[::-1])[::-1]
# seen_ = set([])
# new_levels = []
# for level in levels_[::-1]:
# new_level = [item for item in level if item not in seen_]
# seen_ = seen_.union(set(new_level))
# new_levels.append(new_level)
# new_levels = new_levels[::-1]
# return new_levels
def shortest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-shortest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = shortest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
]
"""
seen_ = set([])
new_levels = []
for level in levels_:
new_level = [item for item in level if item not in seen_]
seen_ = seen_.union(set(new_level))
if len(new_level) > 0:
new_levels.append(new_level)
new_levels = new_levels
return new_levels
def simplify_graph(graph):
"""
strips out everything but connectivity
Args:
graph (nx.Graph):
Returns:
nx.Graph: new_graph
CommandLine:
python3 -m utool.util_graph simplify_graph --show
python2 -m utool.util_graph simplify_graph --show
python2 -c "import networkx as nx; print(nx.__version__)"
python3 -c "import networkx as nx; print(nx.__version__)"
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> new_graph = simplify_graph(graph)
>>> result = ut.repr2(list(new_graph.edges()))
>>> #adj_list = sorted(list(nx.generate_adjlist(new_graph)))
>>> #result = ut.repr2(adj_list)
>>> print(result)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)]
['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
"""
import utool as ut
nodes = sorted(list(graph.nodes()))
node_lookup = ut.make_index_lookup(nodes)
if graph.is_multigraph():
edges = list(graph.edges(keys=True))
else:
edges = list(graph.edges())
new_nodes = ut.take(node_lookup, nodes)
if graph.is_multigraph():
new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges]
else:
new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges]
cls = graph.__class__
new_graph = cls()
new_graph.add_nodes_from(new_nodes)
new_graph.add_edges_from(new_edges)
return new_graph
def subgraph_from_edges(G, edge_list, ref_back=True):
"""
Creates a networkx graph that is a subgraph of G
defined by the list of edges in edge_list.
Requires G to be a networkx MultiGraph or MultiDiGraph
edge_list is a list of edges in either (u,v) or (u,v,d) form
where u and v are nodes comprising an edge,
and d would be a dictionary of edge attributes
ref_back determines whether the created subgraph refers to back
to the original graph and therefore changes to the subgraph's
attributes also affect the original graph, or if it is to create a
new copy of the original graph.
References:
http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges
"""
# TODO: support multi-di-graph
sub_nodes = list({y for x in edge_list for y in x[0:2]})
#edge_list_no_data = [edge[0:2] for edge in edge_list]
multi_edge_list = [edge[0:3] for edge in edge_list]
if ref_back:
G_sub = G.subgraph(sub_nodes)
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
else:
G_sub = G.subgraph(sub_nodes).copy()
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
return G_sub
def nx_node_dict(G):
if nx.__version__.startswith('1'):
return getattr(G, 'node')
else:
return G.nodes
def all_multi_paths(graph, source, target, data=False):
r"""
Returns specific paths along multi-edges from the source to this table.
Multipaths are identified by edge keys.
Returns all paths from source to target. This function treats multi-edges
as distinct and returns the key value in each edge tuple that defines a
path.
Example:
>>> # DISABLE_DOCTEST
>>> from dtool.depcache_control import * # NOQA
>>> from utool.util_graph import * # NOQA
>>> from dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> graph = depc.graph
>>> source = depc.root
>>> target = 'notchpair'
>>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair')
>>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam')
>>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1))
>>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2))
>>> result = '\n'.join([result1, result2])
>>> print(result)
path_list1 = [
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)],
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)],
]
path_list2 = [
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'fgweight', 0),
('fgweight', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'spam', 0),
],
[
('dummy_annot', 'probchip', 0),
('probchip', 'fgweight', 0),
('fgweight', 'spam', 0),
],
]
"""
path_multiedges = list(nx_all_simple_edge_paths(graph, source, target,
keys=True, data=data))
return path_multiedges
def reverse_path_edges(edge_list):
return [(edge[1], edge[0],) + tuple(edge[2:]) for edge in edge_list][::-1]
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft()
def dfs_conditional(G, source, state, can_cross):
"""
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import *
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5)])
>>> G.adj[2][3]['lava'] = True
>>> G.adj[3][4]['lava'] = True
>>> def can_cross(G, edge, state):
>>> # can only cross lava once, then your lava protection wears off
>>> data = G.get_edge_data(*edge)
>>> lava = int(data.get('lava', False))
>>> if not lava or state == 0:
>>> return True, state + lava
>>> return False, lava
>>> assert 5 not in dfs_conditional(G, 1, state=0, can_cross=can_cross)
>>> G.adj[3][4]['lava'] = False
>>> assert 5 in dfs_conditional(G, 1, state=0, can_cross=can_cross)
"""
# stack based version
visited = {source}
stack = [(source, iter(G[source]), state)]
while stack:
parent, children, state = stack[-1]
try:
child = next(children)
if child not in visited:
edge = (parent, child)
flag, new_state = can_cross(G, edge, state)
if flag:
yield child
visited.add(child)
stack.append((child, iter(G[child]), new_state))
except StopIteration:
stack.pop()
def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_if=None,
continue_if=None, visited_nodes=None,
yield_source=False):
"""
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
>>> # DISABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)])
>>> continue_if = lambda G, child, edge: True
>>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False))
>>> print(result)
[(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)]
Example:
>>> # ENABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> continue_if = lambda G, child, edge: (child % 2 == 0)
>>> yield_if = lambda G, child, edge: (child % 2 == 1)
>>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10),
>>> (4, 3), (3, 6),
>>> (0, 2), (2, 4), (4, 6), (6, 10)])
>>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if,
>>> yield_if=yield_if))
>>> print(result)
[1, 3, 5]
"""
if reverse and hasattr(G, 'reverse'):
G = G.reverse()
if isinstance(G, nx.Graph):
neighbors = functools.partial(G.edges, data=data)
else:
neighbors = functools.partial(G.edges, keys=keys, data=data)
queue = collections.deque([])
if visited_nodes is None:
visited_nodes = set([])
else:
visited_nodes = set(visited_nodes)
if source not in visited_nodes:
if yield_nodes and yield_source:
yield source
visited_nodes.add(source)
new_edges = neighbors(source)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((source, new_edges))
while queue:
parent, edges = queue[0]
for edge in edges:
child = edge[1]
if yield_nodes:
if child not in visited_nodes:
if yield_if is None or yield_if(G, child, edge):
yield child
else:
if yield_if is None or yield_if(G, child, edge):
yield edge
if child not in visited_nodes:
visited_nodes.add(child)
# Add new children to queue if the condition is satisfied
if continue_if is None or continue_if(G, child, edge):
new_edges = neighbors(child)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((child, new_edges))
queue.popleft()
def color_nodes(graph, labelattr='label', brightness=.878,
outof=None, sat_adjust=None):
""" Colors edges and nodes by nid """
try:
import plottool_ibeis as pt
except ImportError:
import plottool as pt
import utool as ut
node_to_lbl = nx.get_node_attributes(graph, labelattr)
unique_lbls = sorted(set(node_to_lbl.values()))
ncolors = len(unique_lbls)
if outof is None:
if (ncolors) == 1:
unique_colors = [pt.LIGHT_BLUE]
elif (ncolors) == 2:
# https://matplotlib.org/examples/color/named_colors.html
unique_colors = ['royalblue', 'orange']
unique_colors = list(map(pt.color_funcs.ensure_base01, unique_colors))
else:
unique_colors = pt.distinct_colors(ncolors, brightness=brightness)
else:
unique_colors = pt.distinct_colors(outof, brightness=brightness)
if sat_adjust:
unique_colors = [
pt.color_funcs.adjust_hsv_of_rgb(c, sat_adjust=sat_adjust)
for c in unique_colors
]
# Find edges and aids strictly between two nids
if outof is None:
lbl_to_color = ut.dzip(unique_lbls, unique_colors)
else:
gray = pt.color_funcs.ensure_base01('lightgray')
unique_colors = [gray] + unique_colors
offset = max(1, min(unique_lbls)) - 1
node_to_lbl = ut.map_vals(lambda nid: max(0, nid - offset), node_to_lbl)
lbl_to_color = ut.dzip(range(outof + 1), unique_colors)
node_to_color = ut.map_vals(lbl_to_color, node_to_lbl)
nx.set_node_attributes(graph, name='color', values=node_to_color)
ut.nx_ensure_agraph_color(graph)
def graph_info(graph, ignore=None, stats=False, verbose=False):
import utool as ut
node_dict = nx_node_dict(graph)
node_attrs = list(node_dict.values())
edge_attrs = list(ut.take_column(graph.edges(data=True), 2))
if stats:
import utool
with utool.embed_on_exception_context:
import pandas as pd
node_df = pd.DataFrame(node_attrs)
edge_df = pd.DataFrame(edge_attrs)
if ignore is not None:
ut.delete_dict_keys(node_df, ignore)
ut.delete_dict_keys(edge_df, ignore)
# Not really histograms anymore
try:
node_attr_hist = node_df.describe().to_dict()
except ValueError:
node_attr_hist
try:
edge_attr_hist = edge_df.describe().to_dict()
except ValueError:
edge_attr_hist = {}
key_order = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
node_attr_hist = ut.map_dict_vals(lambda x: ut.order_dict_by(x, key_order), node_attr_hist)
edge_attr_hist = ut.map_dict_vals(lambda x: ut.order_dict_by(x, key_order), edge_attr_hist)
else:
node_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in node_attrs]))
edge_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in edge_attrs]))
if ignore is not None:
ut.delete_dict_keys(edge_attr_hist, ignore)
ut.delete_dict_keys(node_attr_hist, ignore)
node_type_hist = ut.dict_hist(list(map(type, graph.nodes())))
info_dict = ut.odict([
('directed', graph.is_directed()),
('multi', graph.is_multigraph()),
('num_nodes', len(graph)),
('num_edges', len(list(graph.edges()))),
('edge_attr_hist', ut.sort_dict(edge_attr_hist)),
('node_attr_hist', ut.sort_dict(node_attr_hist)),
('node_type_hist', ut.sort_dict(node_type_hist)),
('graph_attrs', graph.graph),
('graph_name', graph.name),
])
#unique_attrs = ut.map_dict_vals(ut.unique, ut.dict_accum(*node_attrs))
#ut.dict_isect_combine(*node_attrs))
#[list(attrs.keys())]
if verbose:
print(ut.repr3(info_dict))
return info_dict
def get_graph_bounding_box(graph):
# import utool as ut
try:
import vtool_ibeis as vt
except ImportError:
import vtool as vt
#nx.get_node_attrs = nx.get_node_attributes
nodes = list(graph.nodes())
# pos_list = nx_gen_node_values(graph, 'pos', nodes, default=(0, 0))
# shape_list = nx_gen_node_values(graph, 'size', nodes, default=(1, 1))
shape_list = nx_gen_node_values(graph, 'size', nodes)
pos_list = nx_gen_node_values(graph, 'pos', nodes)
node_extents = np.array([
vt.extent_from_bbox(vt.bbox_from_center_wh(xy, wh))
for xy, wh in zip(pos_list, shape_list)
])
tl_x, br_x, tl_y, br_y = node_extents.T
extent = tl_x.min(), br_x.max(), tl_y.min(), br_y.max()
bbox = vt.bbox_from_extent(extent)
return bbox
def translate_graph(graph, t_xy):
#import utool as ut
import utool as ut
node_pos_attrs = ['pos']
for attr in node_pos_attrs:
attrdict = nx.get_node_attributes(graph, attr)
attrdict = {
node: pos + t_xy
for node, pos in attrdict.items()
}
nx.set_node_attributes(graph, name=attr, values=attrdict)
edge_pos_attrs = ['ctrl_pts', 'end_pt', 'head_lp', 'lp', 'start_pt', 'tail_lp']
ut.nx_delete_None_edge_attr(graph)
for attr in edge_pos_attrs:
attrdict = nx.get_edge_attributes(graph, attr)
attrdict = {
node: pos + t_xy
if pos is not None else pos
for node, pos in attrdict.items()
}
nx.set_edge_attributes(graph, name=attr, values=attrdict)
def translate_graph_to_origin(graph):
x, y, w, h = get_graph_bounding_box(graph)
translate_graph(graph, (-x, -y))
def stack_graphs(graph_list, vert=False, pad=None):
import utool as ut
graph_list_ = [g.copy() for g in graph_list]
for g in graph_list_:
translate_graph_to_origin(g)
bbox_list = [get_graph_bounding_box(g) for g in graph_list_]
if vert:
dim1 = 3
dim2 = 2
else:
dim1 = 2
dim2 = 3
dim1_list = np.array([bbox[dim1] for bbox in bbox_list])
dim2_list = np.array([bbox[dim2] for bbox in bbox_list])
if pad is None:
pad = np.mean(dim1_list) / 2
offset1_list = ut.cumsum([0] + [d + pad for d in dim1_list[:-1]])
max_dim2 = max(dim2_list)
offset2_list = [(max_dim2 - d2) / 2 for d2 in dim2_list]
if vert:
t_xy_list = [(d2, d1) for d1, d2 in zip(offset1_list, offset2_list)]
else:
t_xy_list = [(d1, d2) for d1, d2 in zip(offset1_list, offset2_list)]
for g, t_xy in zip(graph_list_, t_xy_list):
translate_graph(g, t_xy)
nx.set_node_attributes(g, name='pin', values='true')
new_graph = nx.compose_all(graph_list_)
#pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False) # NOQA
return new_graph
def nx_contracted_nodes(G, u, v, self_loops=True, inplace=False):
"""
copy of networkx function with inplace modification
TODO: commit to networkx
"""
import itertools as it
if G.is_directed():
in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True)
if self_loops or w != u)
out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True)
if self_loops or w != u)
new_edges = it.chain(in_edges, out_edges)
else:
new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True)
if self_loops or w != u)
if inplace:
H = G
new_edges = list(new_edges)
else:
H = G.copy()
node_dict = nx_node_dict(H)
v_data = node_dict[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in node_dict[u]:
node_dict[u]['contraction'][v] = v_data
else:
node_dict[u]['contraction'] = {v: v_data}
return H
def approx_min_num_components(nodes, negative_edges):
"""
Find approximate minimum number of connected components possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This is only an approximate
solution. Not sure what the approximation ratio is.
CommandLine:
python -m utool.util_graph approx_min_num_components
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> g_neg = nx.complement(g_pos)
>>> #import plottool as pt
>>> #pt.qt4ensure()
>>> #pt.show_nx(g_pos)
>>> #pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> result = approx_min_num_components(nodes, negative_edges)
>>> print(result)
2
"""
import utool as ut
num = 0
g_neg = nx.Graph()
g_neg.add_nodes_from(nodes)
g_neg.add_edges_from(negative_edges)
# Collapse all nodes with degree 0
if nx.__version__.startswith('2'):
deg0_nodes = [n for n, d in g_neg.degree() if d == 0]
else:
deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0]
for u, v in ut.itertwo(deg0_nodes):
nx_contracted_nodes(g_neg, v, u, inplace=True)
# g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False)
# Initialize unused nodes to be everything
unused = list(g_neg.nodes())
# complement of the graph contains all possible positive edges
g_pos = nx.complement(g_neg)
if False:
from networkx.algorithms.approximation import clique
maxiset, cliques = clique.clique_removal(g_pos)
num = len(cliques)
return num
# Iterate until we have used all nodes
while len(unused) > 0:
# Seed a new "minimum component"
num += 1
# Grab a random unused node n1
#idx1 = np.random.randint(0, len(unused))
idx1 = 0
n1 = unused[idx1]
unused.remove(n1)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
while len(neigbs) > 0:
# Find node n2, that n1 could be connected to
#idx2 = np.random.randint(0, len(neigbs))
idx2 = 0
n2 = neigbs[idx2]
unused.remove(n2)
# Collapse negative information of n1 and n2
g_neg = nx.contracted_nodes(g_neg, n1, n2)
# Compute new possible positive edges
g_pos = nx.complement(g_neg)
# Iterate until n1 has no more possible connections
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
print('num = %r' % (num,))
return num
def nx_mincut_edges_weighted(G, s, t, capacity='weight'):
# http://stackoverflow.com/questions/33332462/minimum-s-t-edge-cut-which-takes-edge-weight-into-consideration
cut_weight, partitions = nx.minimum_cut(G, s, t, capacity=capacity)
edge_cut_list = []
for p1_node in partitions[0]:
for p2_node in partitions[1]:
if G.has_edge(p1_node, p2_node):
edge_cut_list.append((p1_node, p2_node))
# assert edge_cut_list == nx_edges_between(G, partitions[0], partitions[1])
return edge_cut_list
def weighted_diamter(graph, weight=None):
if weight is None:
distances = nx.all_pairs_shortest_path_length(graph)
else:
distances = nx.all_pairs_dijkstra_path_length(graph, weight=weight)
if isinstance(distances, dict):
eccentricities = (max(list(dists.values())) for node, dists in distances.items())
else:
eccentricities = (max(list(dists.values())) for node, dists in distances)
diameter = max(list(eccentricities))
return diameter
def mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
"""
PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,))
"""
import utool as ut
import operator as op
if candidates is None:
candidates = list(graph.complement().edges(data=True))
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
variable_basis = [(0, 1) for _ in candidates]
best_energy = np.inf
best_soln = None
soln_generator = ut.product(*variable_basis)
length = reduce(op.mul, map(len, variable_basis), 1)
if length > 3000:
# Let the user know that it might take some time to find a solution
soln_generator = ut.ProgIter(soln_generator, label='BruteForce BCMD',
length=length)
# Brute force solution
for x in soln_generator:
chosen_edges = ut.compress(candidates, x)
aug_graph = augment_add(graph, chosen_edges)
total_cost = weighted_diamter(aug_graph, weight=cost)
energy = solution_energy(chosen_edges)
if total_cost <= max_cost:
if energy < best_energy:
best_energy = energy
best_soln = x
best_edges = ut.compress(candidates, best_soln)
return best_edges
def greedy_mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
# import utool as ut
def solution_cost(graph):
return weighted_diamter(graph, weight=cost)
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def augment_remove(graph, edges):
aug_graph = graph.copy()
aug_graph.remove_edges_from(edges)
return aug_graph
base_cost = solution_cost(graph)
# base_energy = 0
full_graph = augment_add(graph, candidates)
full_cost = solution_cost(full_graph)
# full_energy = solution_energy(candidates)
def greedy_improvement(soln_graph, available_candidates, base_cost=None):
"""
Choose edge that results in the best improvement
"""
best_loss = None
best_cost = None
best_energy = None
best_e = None
best_graph = None
for e in available_candidates:
aug_graph = augment_add(soln_graph, [e])
aug_cost = solution_cost(aug_graph)
aug_energy = solution_energy([e])
# We don't want to go over if possible
aug_loss = max(aug_cost - max_cost, 0)
if best_loss is None or aug_loss <= best_loss:
if best_energy is None or aug_energy < best_energy:
best_loss = aug_loss
best_e = e
best_graph = aug_graph
best_cost = aug_cost
best_energy = aug_energy
if best_e is None:
return None
else:
return best_cost, best_graph, best_energy, best_e
import warnings
if full_cost > max_cost:
warnings.warn('no feasible solution')
else:
soln_graph = graph.copy()
available_candidates = candidates[:]
soln_edges = []
soln_energy = 0
soln_cost = base_cost
# Add edges to the solution until the cost is feasible
while soln_cost > max_cost and len(available_candidates):
tup = greedy_improvement(soln_graph, available_candidates, soln_cost)
if tup is None:
warnings.warn('no improvement found')
break
soln_cost, soln_graph, best_energy, best_e = tup
soln_energy += best_energy
soln_edges.append(best_e)
available_candidates.remove(best_e)
# Check to see we can remove edges while maintaining feasibility
for e in soln_edges[:]:
aug_graph = augment_remove(soln_graph, [e])
aug_cost = solution_cost(aug_graph)
if aug_cost <= soln_cost:
soln_cost = aug_cost
soln_graph = aug_graph
soln_edges.remove(e)
return soln_edges
if __name__ == '__main__':
r"""
CommandLine:
python -m utool.util_graph --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
akionakamura/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/plotting/test_hist_method.py | 6 | 15335 | # coding: utf-8
""" Test cases for .hist method """
import pytest
from pandas import Series, DataFrame
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy.random import randn
from pandas.plotting._core import grouped_hist
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
tm._skip_module_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an `ax` kwarg to the method call
# so we get a warning about an axis being cleared, even
# though we don't explicing pass one, see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category,
layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
assert len(self.plt.get_fignums()) == 1
@slow
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with pytest.raises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
class TestDataFramePlots(TestPlotBase):
@slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 3))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert not axes[1, 1].get_visible()
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
{'layout': (-1, 4), 'expected_size': (1, 4)},
{'layout': (4, -1), 'expected_size': (4, 1)},
{'layout': (-1, 2), 'expected_size': (2, 2)},
{'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test['layout'])
expected = layout_test['expected_size']
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with pytest.raises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with pytest.raises(ValueError):
df.hist(layout=(1,))
with pytest.raises(ValueError):
df.hist(layout=(-1, -1))
@slow
# GH 9351
def test_tight_layout(self):
if self.mpl_ge_2_0_1:
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
self.plt.tight_layout()
tm.close()
class TestDataFrameGroupByPlots(TestPlotBase):
@slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(500, 2), columns=['A', 'B'])
df['C'] = np.random.randint(0, 4, 500)
df['D'] = ['X'] * 500
axes = grouped_hist(df.A, by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
axes = df.hist(by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
# group by a key with single value
axes = df.hist(by='D', rot=30)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self._check_ticks_props(axes, xrot=30)
tm.close()
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = grouped_hist(df.A, by=df.C, normed=True, cumulative=True,
bins=4, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
height = rects[-1].get_height()
tm.assert_almost_equal(height, 1.0)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
axes = grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
self._check_ax_scales(axes, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
grouped_hist(df.A, by=df.C, foo='bar')
with tm.assert_produces_warning(FutureWarning):
df.hist(by='C', figsize='default')
@slow
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender_int = np.random.choice([0, 1], size=n)
df_int = DataFrame({'height': height, 'weight': weight,
'gender': gender_int})
gb = df_int.groupby('gender')
axes = gb.hist()
assert len(axes) == 2
assert len(self.plt.get_fignums()) == 2
tm.close()
@slow
def test_grouped_hist_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.hist, column='weight', by=df.gender,
layout=(1, 1))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(1, 3))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(-1, -1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, -1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category,
layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
# GH 6769
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column='height', by='classroom', layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
# without column
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, by='classroom')
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.hist(by='gender', layout=(3, 5))
self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
axes = df.hist(column=['height', 'weight', 'category'])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
@slow
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
fig, axes = self.plt.subplots(2, 3)
returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
returned = df.hist(by='classroom', ax=axes[1])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.hist(column='height', ax=axes)
@slow
def test_axis_share_x(self):
df = self.hist_df
# GH4089
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
# share x
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
# don't share y
assert not ax1._shared_y_axes.joined(ax1, ax2)
assert not ax2._shared_y_axes.joined(ax1, ax2)
@slow
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
# share y
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
# don't share x
assert not ax1._shared_x_axes.joined(ax1, ax2)
assert not ax2._shared_x_axes.joined(ax1, ax2)
@slow
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,
sharey=True)
# share both x and y
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
| mit |
shangwuhencc/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
mcgibbon/atmos | atmos/plot.py | 2 | 23842 | """
plot.py: Utilities for plotting meteorological data. Importing this package
gives access to the "skewT" projection.
This file was originally edited from code in MetPy. The MetPy copyright
disclamer is at the bottom of the source code of this file.
"""
import numpy as np
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
from matplotlib.axes import Axes
from matplotlib.collections import LineCollection
from matplotlib.projections import register_projection
from matplotlib.ticker import ScalarFormatter, MultipleLocator
from atmos import calculate
from atmos.constants import g0
from scipy.integrate import odeint
from atmos.util import closest_val
from appdirs import user_cache_dir
import os
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__name__)
lower_interval = self.axes.xaxis.lower_interval
upper_interval = self.axes.xaxis.upper_interval
if self.gridOn and transforms.interval_contains(
self.axes.xaxis.get_view_interval(), self.get_loc()):
self.gridline.draw(renderer)
if transforms.interval_contains(lower_interval, self.get_loc()):
if self.tick1On:
self.tick1line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if transforms.interval_contains(upper_interval, self.get_loc()):
if self.tick2On:
self.tick2line.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def __init__(self, *args, **kwargs):
maxis.XAxis.__init__(self, *args, **kwargs)
self.upper_interval = 0.0, 1.0
def _get_tick(self, major):
return SkewXTick(self.axes, 0, '', major=major)
@property
def lower_interval(self):
return self.axes.viewLim.intervalx
def get_view_interval(self):
return self.upper_interval[0], self.axes.viewLim.intervalx[1]
class SkewYAxis(maxis.YAxis):
pass
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
trans = self.axes.transDataToAxes.inverted()
if self.spine_type == 'top':
yloc = 1.0
else:
yloc = 0.0
left = trans.transform_point((0.0, yloc))[0]
right = trans.transform_point((1.0, yloc))[0]
pts = self._path.vertices
pts[0, 0] = left
pts[1, 0] = right
self.axis.upper_interval = (left, right)
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewTAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewT'
default_xlim = (-40, 50)
default_ylim = (1050, 100)
def __init__(self, *args, **kwargs):
# This needs to be popped and set before moving on
self.rot = kwargs.pop('rotation', 45)
# set booleans to keep track of extra axes that are plotted
self._mixing_lines = []
self._dry_adiabats = []
self._moist_adiabats = []
Axes.__init__(self, *args, **kwargs)
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.yaxis.set_major_formatter(ScalarFormatter())
self.yaxis.set_major_locator(MultipleLocator(100))
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
# pylint: disable=unused-argument
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(self.rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(self.rot, 0)) + self.transAxes
def cla(self):
Axes.cla(self)
# Disables the log-formatting that comes with semilogy
self.yaxis.set_major_formatter(ScalarFormatter())
self.yaxis.set_major_locator(MultipleLocator(100))
if not self.yaxis_inverted():
self.invert_yaxis()
# Try to make sane default temperature plotting
self.xaxis.set_major_locator(MultipleLocator(5))
self.xaxis.set_major_formatter(ScalarFormatter())
self.set_xlim(*self.default_xlim)
self.set_ylim(*self.default_ylim)
def semilogy(self, p, T, *args, **kwargs):
r'''Plot data.
Simple wrapper around plot so that pressure is the first (independent)
input. This is essentially a wrapper around `semilogy`.
Parameters
----------
p : array_like
pressure values
T : array_like
temperature values, can also be used for things like dew point
args
Other positional arguments to pass to `semilogy`
kwargs
Other keyword arguments to pass to `semilogy`
See Also
--------
`matplotlib.Axes.semilogy`
'''
# We need to replace the overridden plot with the original Axis plot
# since it is called within Axes.semilogy
no_plot = SkewTAxes.plot
SkewTAxes.plot = Axes.plot
Axes.semilogy(self, T, p, *args, **kwargs)
# Be sure to put back the overridden plot method
SkewTAxes.plot = no_plot
self.yaxis.set_major_formatter(ScalarFormatter())
self.yaxis.set_major_locator(MultipleLocator(100))
labels = self.xaxis.get_ticklabels()
for label in labels:
label.set_rotation(45)
label.set_horizontalalignment('right')
label.set_fontsize(8)
label.set_color('#B31515')
self.grid(True)
self.grid(axis='top', color='#B31515', linestyle='-', linewidth=1,
alpha=0.5, zorder=1.1)
self.grid(axis='x', color='#B31515', linestyle='-', linewidth=1,
alpha=0.5, zorder=1.1)
self.grid(axis='y', color='k', linestyle='-', linewidth=0.5, alpha=0.5,
zorder=1.1)
self.set_xlabel(r'Temperature ($^{\circ} C$)', color='#B31515')
self.set_ylabel('Pressure ($hPa$)')
if len(self._mixing_lines) == 0:
self.plot_mixing_lines()
if len(self._dry_adiabats) == 0:
self.plot_dry_adiabats()
if len(self._moist_adiabats) == 0:
self.plot_moist_adiabats()
def plot(self, *args, **kwargs):
r'''Plot data.
Simple wrapper around plot so that pressure is the first (independent)
input. This is essentially a wrapper around `semilogy`.
Parameters
----------
p : array_like
pressure values
T : array_like
temperature values, can also be used for things like dew point
args
Other positional arguments to pass to `semilogy`
kwargs
Other keyword arguments to pass to `semilogy`
See Also
--------
`matplotlib.Axes.semilogy`
'''
self.semilogy(*args, **kwargs)
def semilogx(self, *args, **kwargs):
r'''Plot data.
Simple wrapper around plot so that pressure is the first (independent)
input. This is essentially a wrapper around `semilogy`.
Parameters
----------
p : array_like
pressure values
T : array_like
temperature values, can also be used for things like dew point
args
Other positional arguments to pass to `semilogy`
kwargs
Other keyword arguments to pass to `semilogy`
See Also
--------
`matplotlib.Axes.semilogy`
'''
self.semilogy(*args, **kwargs)
def plot_barbs(self, p, u, v, xloc=1.0, x_clip_radius=0.08,
y_clip_radius=0.08, **kwargs):
r'''Plot wind barbs.
Adds wind barbs to the skew-T plot. This is a wrapper around the
`barbs` command that adds to appropriate transform to place the
barbs in a vertical line, located as a function of pressure.
Parameters
----------
p : array_like
pressure values
u : array_like
U (East-West) component of wind
v : array_like
V (North-South) component of wind
xloc : float, optional
Position for the barbs, in normalized axes coordinates, where 0.0
denotes far left and 1.0 denotes far right. Defaults to far right.
x_clip_radius : float, optional
Space, in normalized axes coordinates, to leave before clipping
wind barbs in the x-direction. Defaults to 0.08.
y_clip_radius : float, optional
Space, in normalized axes coordinates, to leave above/below plot
before clipping wind barbs in the y-direction. Defaults to 0.08.
kwargs
Other keyword arguments to pass to `barbs`
See Also
--------
`matplotlib.Axes.barbs`
'''
#kwargs.setdefault('length', 7)
# Assemble array of x-locations in axes space
x = np.empty_like(p)
x.fill(xloc)
# Do barbs plot at this location
b = self.barbs(x, p, u, v,
transform=self.get_yaxis_transform(which='tick2'),
clip_on=True, **kwargs)
# Override the default clip box, which is the axes rectangle, so we can
# have barbs that extend outside.
ax_bbox = transforms.Bbox([[xloc-x_clip_radius, -y_clip_radius],
[xloc+x_clip_radius, 1.0 + y_clip_radius]])
b.set_clip_box(transforms.TransformedBbox(ax_bbox, self.transAxes))
def plot_dry_adiabats(self, p=None, theta=None, **kwargs):
r'''Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an
alpha value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the dry
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
theta : array_like, optional
1-dimensional array of potential temperature values for dry
adiabats. By default these will be generated based on the current
temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also#B85C00
--------
plot_moist_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.dry_lapse`
'''
for artist in self._dry_adiabats:
artist.remove()
self._dry_adiabats = []
# Determine set of starting temps if necessary
if theta is None:
xmin, xmax = self.get_xlim()
theta = np.arange(xmin, xmax + 201, 10)
# Get pressure levels based on ylims if necessary
if p is None:
p = np.linspace(*self.get_ylim())
# Assemble into data for plotting
t = calculate('T', theta=theta[:, None], p=p, p_units='hPa',
T_units='degC', theta_units='degC')
linedata = [np.vstack((ti, p)).T for ti in t]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#A65300')
kwargs.setdefault('linestyles', '-')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._dry_adiabats.append(collection)
self.add_collection(collection)
theta = theta.flatten()
T_label = calculate('T', p=140, p_units='hPa', theta=theta,
T_units='degC', theta_units='degC')
for i in range(len(theta)):
text = self.text(
T_label[i], 140, '{:.0f}'.format(theta[i]),
fontsize=8, ha='left', va='center', rotation=-60,
color='#A65300', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
text.set_clip_on(True)
self._dry_adiabats.append(text)
def plot_moist_adiabats(self, p=None, thetaes=None, **kwargs):
r'''Plot moist adiabats.
Adds saturated pseudo-adiabats (lines of constant equivalent potential
temperature) to the plot. The default style of these lines is dashed
blue lines with an alpha value of 0.5. These can be overridden using
keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the moist
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
thetaes : array_like, optional
1-dimensional array of saturation equivalent potential temperature
values for moist adiabats. By default these will be generated based
on the current temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also
--------
plot_dry_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.moist_lapse`
'''
for artist in self._moist_adiabats:
artist.remove()
self._moist_adiabats = []
def dT_dp(y, p0):
return calculate('Gammam', T=y, p=p0, RH=100., p_units='hPa',
T_units='degC')/(
g0*calculate('rho', T=y, p=p0, p_units='hPa', T_units='degC',
RH=100.))*100.
if thetaes is None:
xmin, xmax = self.get_xlim()
thetaes = np.concatenate((np.arange(xmin, 0, 5),
np.arange(0, xmax + 51, 5)))
# Get pressure levels based on ylims if necessary
if p is None:
p = np.linspace(self.get_ylim()[0], self.get_ylim()[1])
cache_dir = user_cache_dir('atmos')
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
cache_filename = os.path.join(cache_dir, 'moist_adiabat_data.npz')
request_str = 'p:{},\nthetaes:{}'.format(
np.array_str(p), np.array_str(thetaes))
t = None
cached_data = None
if os.path.isfile(cache_filename):
cached_data = np.load(cache_filename)
if request_str in cached_data.keys():
t = cached_data[request_str]
if t is None:
# did not find cached data
# Assemble into data for plotting
thetaes_base = odeint(
dT_dp, thetaes, np.array([1e3, p[0]], dtype=np.float64))[-1, :]
result = odeint(dT_dp, thetaes_base, p)
t = result.T
data_to_cache = {}
if cached_data is not None:
data_to_cache.update(cached_data)
data_to_cache[request_str] = t
np.savez(cache_filename, **data_to_cache)
linedata = [np.vstack((ti, p)).T for ti in t]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#166916')
kwargs.setdefault('linestyles', '-')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._moist_adiabats.append(collection)
self.add_collection(collection)
label_index = closest_val(240., p)
T_label = t[:, label_index].flatten()
for i in range(len(thetaes)):
text = self.text(
T_label[i], p[label_index],
'{:.0f}'.format(thetaes[i]),
fontsize=8, ha='left', va='center', rotation=-65,
color='#166916', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
text.set_clip_on(True)
self._moist_adiabats.append(text)
def plot_mixing_lines(self, p=None, rv=None, **kwargs):
r'''Plot lines of constant mixing ratio.
Adds lines of constant mixing ratio (isohumes) to the
plot. The default style of these lines is dashed green lines with an
alpha value of 0.8. These can be overridden using keyword arguments.
Parameters
----------
rv : array_like, optional
1-dimensional array of unitless mixing ratio values to plot. If
none are given, default values are used.
p : array_like, optional
1-dimensional array of pressure values to be included in the
isohumes. If not specified, they will be linearly distributed
across the current plotted pressure range.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also
--------
`matplotlib.collections.LineCollection`
'''
for artist in self._mixing_lines:
artist.remove()
self._mixing_lines = []
# Default mixing level values if necessary
if rv is None:
rv = np.array([
0.1e-3, 0.2e-3, 0.5e-3, 1e-3, 1.5e-3, 2e-3, 3e-3, 4e-3, 6e-3,
8e-3, 10e-3, 12e-3, 15e-3, 20e-3, 30e-3, 40e-3,
50e-3]).reshape(-1, 1)
else:
rv = np.asarray(rv).reshape(-1, 1)
# Set pressure range if necessary
if p is None:
p = np.linspace(min(self.get_ylim()), max(self.get_ylim()))
else:
p = np.asarray(p)
# Assemble data for plotting
Td = calculate(
'Td', p=p, rv=rv, p_units='hPa', rv_units='kg/kg',
Td_units='degC')
Td_label = calculate('Td', p=550, p_units='hPa', rv=rv,
Td_units='degC')
linedata = [np.vstack((t, p)).T for t in Td]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#166916')
kwargs.setdefault('linestyles', '--')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._mixing_lines.append(collection)
self.add_collection(collection)
rv = rv.flatten() * 1000
for i in range(len(rv)):
if rv[i] < 1:
format_string = '{:.1f}'
else:
format_string = '{:.0f}'
t = self.text(Td_label[i], 550, format_string.format(rv[i]),
fontsize=8, ha='right', va='center', rotation=60,
color='#166916', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
t.set_clip_on(True)
self._mixing_lines.append(t)
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewTAxes)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(6, 6))
# ax = fig.add_subplot(1, 1, 1, projection='skewT')
fig, ax = plt.subplots(1, 1, figsize=(6, 6),
subplot_kw={'projection': 'skewT'})
# ax = plt.subplot(projection='skewT')
ax.plot(np.linspace(1e3, 100, 100), np.linspace(0, -50, 100))
ax.plot_barbs(np.linspace(1e3, 100, 10), np.linspace(50, -50, 10),
np.linspace(-50, 50, 10), xloc=0.95)
plt.tight_layout()
plt.show()
# Copyright (c) 2008-2014, MetPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the MetPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| mit |
jyt109/BDA_py_demos | demos_pystan/pystan_demo.py | 19 | 12220 | """Bayesian Data Analysis, 3rd ed
PyStan demo
Demo for using Stan with Python interface PyStan.
"""
import numpy as np
import pystan
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# ====== Bernoulli model =======================================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[0,1,0,0,1,1,1,0,1,0])
fit = pystan.stan(model_code=bernoulli_code, data=data)
print(fit)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Vectorized Bernoulli model ============================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[1,1,1,0,1,1,1,0,1,1])
fit = pystan.stan(model_code=bernoulli_code, data=data)
# ====== Binomial model ========================================================
binomial_code = """
data {
int<lower=0> N;
int<lower=0> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ binomial(N,theta);
}
"""
data = dict(N=10, y=8)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Re-running Binomial model with new data ===============================
data = dict(N=10, y=10)
fit = pystan.stan(fit=fit, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Comparison of two groups with Binomial ================================
binomial_code = """
data {
int<lower=0> N1;
int<lower=0> y1;
int<lower=0> N2;
int<lower=0> y2;
}
parameters {
real<lower=0,upper=1> theta1;
real<lower=0,upper=1> theta2;
}
transformed parameters {
real oddsratio;
oddsratio <- (theta2/(1-theta2))/(theta1/(1-theta1));
}
model {
theta1 ~ beta(1,1);
theta2 ~ beta(1,1);
y1 ~ binomial(N1,theta1);
y2 ~ binomial(N2,theta2);
}
"""
data = dict(N1=674, y1=39, N2=680, y2=22)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['oddsratio'], 50)
plt.show()
# ====== Gaussian linear model =================================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N=N, x=x, y=y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with adjustable priors ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
real pmualpha; // prior mean for alpha
real psalpha; // prior std for alpha
real pmubeta; // prior mean for beta
real psbeta; // prior std for beta
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
alpha ~ normal(pmualpha,psalpha);
beta ~ normal(pmubeta,psbeta);
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
x = x,
y = y,
pmualpha = y.mean(), # Centered
psalpha = (14-4)/6.0, # avg temp between 4-14
pmubeta = 0, # a priori increase and decrese as likely
psbeta = (.1--.1)/6.0 # avg temp probably does not increase more than 1
# degree per 10 years
)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with standardized data ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
transformed data {
vector[N] x_std;
vector[N] y_std;
x_std <- (x - mean(x)) / sd(x);
y_std <- (y - mean(y)) / sd(y);
}
parameters {
real alpha;
real beta;
real<lower=0> sigma_std;
}
transformed parameters {
vector[N] mu_std;
mu_std <- alpha + beta*x_std;
}
model {
alpha ~ normal(0,1);
beta ~ normal(0,1);
y_std ~ normal(mu_std, sigma_std);
}
generated quantities {
vector[N] mu;
real<lower=0> sigma;
mu <- mean(y) + mu_std*sd(y);
sigma <- sigma_std*sd(y);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear student-t model =======================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
real<lower=1,upper=80> nu;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
nu ~ gamma(2,0.1); // Juarez and Steel (2010)
y ~ student_t(nu, mu, sigma);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,12))
plt.subplot(4,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(4,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(4,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.subplot(4,1,4)
plt.hist(samples['nu'], 50)
plt.xlabel('nu')
plt.tight_layout()
plt.show()
# ====== Comparison of k groups (ANOVA) ========================================
group_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=group_code, data=data)
# Analyse results
mu = fit.extract(permuted=True)['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
# ====== Hierarchical prior model for comparison of k groups (ANOVA) ===========
# results do not differ much from the previous, because there is only
# few groups and quite much data per group, but this works as an example anyway
hier_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
real mu0; // prior mean
real<lower=0> sigma0; // prior std
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
mu0 ~ normal(10,10); // weakly informative prior
sigma0 ~ cauchy(0,4); // weakly informative prior
mu ~ normal(mu0, sigma0); // population prior with unknown parameters
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=hier_code, data=data)
# Analyse results
samples = fit.extract(permuted=True)
print "std(mu0): {}".format(np.std(samples['mu0']))
mu = samples['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
| gpl-3.0 |
ChanderG/scikit-learn | sklearn/preprocessing/__init__.py | 31 | 1235 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
bdrillard/spark | python/pyspark/sql/functions.py | 1 | 143697 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal, \
_create_column_from_name
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _create_function(name, doc=""):
"""Create a PySpark function by its name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function_over_column(name, doc=""):
"""Similar with `_create_function` but creates a PySpark function that takes a column
(as string as well). This is mainly for PySpark functions to take strings as
column names.
"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)
if isinstance(col2, Column):
arg2 = col2._jc
elif isinstance(col2, basestring):
arg2 = _create_column_from_name(col2)
else:
arg2 = float(col2)
jc = getattr(sc._jvm.functions, name)(arg1, arg2)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
def _options_to_str(options):
return {key: to_str(value) for (key, value) in options.items()}
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions_over_column = {
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4_over_column = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6_over_column = {
# unary math functions
'stddev': 'Aggregate function: alias for stddev_samp.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: alias for var_samp.',
'var_samp': 'Aggregate function: returns the unbiased sample variance of' +
' the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1_over_column = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_over_column.items():
globals()[_name] = since(1.3)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_1_4_over_column.items():
globals()[_name] = since(1.4)(_create_function_over_column(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6_over_column.items():
globals()[_name] = since(1.6)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_2_1_over_column.items():
globals()[_name] = since(2.1)(_create_function_over_column(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=2.4052597283576684),
Row(age=5, name=u'Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=1.1027054481455365),
Row(age=5, name=u'Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="uuuu-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='uuuu-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('uuuu-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(3.0)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to lower case.',
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function_over_column(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param format: string that can contain embedded format tags and used as result column's value
:param cols: list of column names (string) or list of :class:`Column` expressions to
be used in formatting
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, "a")).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(_to_java_column(col), extraction))
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s=u'abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
COGROUPED_MAP = PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
MAP_ITER = PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs can be used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. SCALAR_ITER
A scalar iterator UDF is semantically the same as the scalar Pandas UDF above except that the
wrapped Python function takes an iterator of batches as input instead of a single batch and,
instead of returning a single output batch, it yields output batches or explicitly returns an
generator or an iterator of output batches.
It is useful when the UDF execution requires initializing some state, e.g., loading a machine
learning model file to apply inference to every input batch.
.. note:: It is not guaranteed that one invocation of a scalar iterator UDF will process all
batches from one partition, although it is currently implemented this way.
Your code shall not rely on this behavior because it might change in the future for
further optimization, e.g., one invocation processes multiple partitions.
Scalar iterator UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
>>> pdf = pd.DataFrame([1, 2, 3], columns=["x"]) # doctest: +SKIP
>>> df = spark.createDataFrame(pdf) # doctest: +SKIP
When the UDF is called with a single column that is not `StructType`, the input to the
underlying function is an iterator of `pd.Series`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_one(batch_iter):
... for x in batch_iter:
... yield x + 1
...
>>> df.select(plus_one(col("x"))).show() # doctest: +SKIP
+-----------+
|plus_one(x)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
When the UDF is called with more than one columns, the input to the underlying function is an
iterator of `pd.Series` tuple.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_cols(batch_iter):
... for a, b in batch_iter:
... yield a * b
...
>>> df.select(multiply_two_cols(col("x"), col("x"))).show() # doctest: +SKIP
+-----------------------+
|multiply_two_cols(x, x)|
+-----------------------+
| 1|
| 4|
| 9|
+-----------------------+
When the UDF is called with a single column that is `StructType`, the input to the underlying
function is an iterator of `pd.DataFrame`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_nested_cols(pdf_iter):
... for pdf in pdf_iter:
... yield pdf["a"] * pdf["b"]
...
>>> df.select(
... multiply_two_nested_cols(
... struct(col("x").alias("a"), col("x").alias("b"))
... ).alias("y")
... ).show() # doctest: +SKIP
+---+
| y|
+---+
| 1|
| 4|
| 9|
+---+
In the UDF, you can initialize some states before processing batches, wrap your code with
`try ... finally ...` or use context managers to ensure the release of resources at the end
or in case of early termination.
>>> y_bc = spark.sparkContext.broadcast(1) # doctest: +SKIP
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_y(batch_iter):
... y = y_bc.value # initialize some state
... try:
... for x in batch_iter:
... yield x + y
... finally:
... pass # release resources here, if any
...
>>> df.select(plus_y(col("x"))).show() # doctest: +SKIP
+---------+
|plus_y(x)|
+---------+
| 2|
| 3|
| 4|
+---------+
3. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
4. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
5. MAP_ITER
A map iterator Pandas UDFs are used to transform data with an iterator of batches.
It can be used with :meth:`pyspark.sql.DataFrame.mapInPandas`.
It can return the output of arbitrary length in contrast to the scalar Pandas UDF.
It maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined
function and returns the result as a :class:`DataFrame`.
The user-defined function should take an iterator of `pandas.DataFrame`\\s and return another
iterator of `pandas.DataFrame`\\s. All columns are passed together as an
iterator of `pandas.DataFrame`\\s to the user-defined function and the returned iterator of
`pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
>>> df = spark.createDataFrame([(1, 21), (2, 30)],
... ("id", "age")) # doctest: +SKIP
>>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP
... def filter_func(batch_iter):
... for pdf in batch_iter:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| ''| ''| ''| '\x01'| '\x01'| ''| ''| '\x01'| '\x01'| ''| ''| ''| X| X| 'a'| X| X| ''| X| ''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')| X| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 0.24.2 and PyArrow 0.13.0 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
spark.conf.set("spark.sql.legacy.utcTimestampFunc.enabled", "true")
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.conf.unset("spark.sql.legacy.utcTimestampFunc.enabled")
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
raghavrv/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
gfyoung/pandas | scripts/generate_pip_deps_from_conda.py | 6 | 4236 | #!/usr/bin/env python3
"""
Convert the conda environment.yml to the pip requirements-dev.txt,
or check that they have the same packages (for the CI)
Usage:
Generate `requirements-dev.txt`
$ python scripts/generate_pip_deps_from_conda.py
Compare and fail (exit status != 0) if `requirements-dev.txt` has not been
generated with this script:
$ python scripts/generate_pip_deps_from_conda.py --compare
"""
import argparse
import os
import re
import sys
import yaml
EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
RENAME = {"pytables": "tables", "pyqt": "pyqt5", "dask-core": "dask"}
def conda_package_to_pip(package):
"""
Convert a conda package to its pip equivalent.
In most cases they are the same, those are the exceptions:
- Packages that should be excluded (in `EXCLUDE`)
- Packages that should be renamed (in `RENAME`)
- A package requiring a specific version, in conda is defined with a single
equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)
"""
package = re.sub("(?<=[^<>])=", "==", package).strip()
for compare in ("<=", ">=", "=="):
if compare not in package:
continue
pkg, version = package.split(compare)
if pkg in EXCLUDE:
return
if pkg in RENAME:
return "".join((RENAME[pkg], compare, version))
break
if package in EXCLUDE:
return
if package in RENAME:
return RENAME[package]
return package
def main(conda_fname, pip_fname, compare=False):
"""
Generate the pip dependencies file from the conda file, or compare that
they are synchronized (``compare=True``).
Parameters
----------
conda_fname : str
Path to the conda file with dependencies (e.g. `environment.yml`).
pip_fname : str
Path to the pip file with dependencies (e.g. `requirements-dev.txt`).
compare : bool, default False
Whether to generate the pip file (``False``) or to compare if the
pip file has been generated with this script and the last version
of the conda file (``True``).
Returns
-------
bool
True if the comparison fails, False otherwise
"""
with open(conda_fname) as conda_fd:
deps = yaml.safe_load(conda_fd)["dependencies"]
pip_deps = []
for dep in deps:
if isinstance(dep, str):
conda_dep = conda_package_to_pip(dep)
if conda_dep:
pip_deps.append(conda_dep)
elif isinstance(dep, dict) and len(dep) == 1 and "pip" in dep:
pip_deps += dep["pip"]
else:
raise ValueError(f"Unexpected dependency {dep}")
fname = os.path.split(conda_fname)[1]
header = (
f"# This file is auto-generated from {fname}, do not modify.\n"
"# See that file for comments about the need/usage of each dependency.\n\n"
)
pip_content = header + "\n".join(pip_deps) + "\n"
if compare:
with open(pip_fname) as pip_fd:
return pip_content != pip_fd.read()
else:
with open(pip_fname, "w") as pip_fd:
pip_fd.write(pip_content)
return False
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description="convert (or compare) conda file to pip"
)
argparser.add_argument(
"--compare",
action="store_true",
help="compare whether the two files are equivalent",
)
argparser.add_argument(
"--azure", action="store_true", help="show the output in azure-pipelines format"
)
args = argparser.parse_args()
repo_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
res = main(
os.path.join(repo_path, "environment.yml"),
os.path.join(repo_path, "requirements-dev.txt"),
compare=args.compare,
)
if res:
msg = (
f"`requirements-dev.txt` has to be generated with `{sys.argv[0]}` after "
"`environment.yml` is modified.\n"
)
if args.azure:
msg = (
f"##vso[task.logissue type=error;sourcepath=requirements-dev.txt]{msg}"
)
sys.stderr.write(msg)
sys.exit(res)
| bsd-3-clause |
jgillis/casadi | experimental/joel/nlp_benchmarks/clnlbeam.py | 1 | 3051 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
import numpy as NP
import matplotlib.pyplot as plt
# This is a CasADi version of clnlbeam.mod from the cute test collection, original by Hande Y. Benson
#
# H. Maurer and H.D. Mittelman,
# "The non-linear beam via optimal control with bound state variables",
# Optimal Control Applications and Methods 12, pp. 19-31, 1991.
#ni = 500 # original
#ni = 20000 # web
ni = 500 # cuter
alpha = 350.0
h = 1./ni
t = ssym("t", ni+1)
t_lb = -1.0 * NP.ones(ni+1)
t_ub = 1.0 * NP.ones(ni+1)
t_guess = NP.array(list(0.05*cos(i*h) for i in range(ni+1)))
t_lb[0] = 0; t_ub[0] = 0
t_lb[ni] = 0; t_ub[ni] = 0
x = ssym("x", ni+1)
x_lb = -0.05 * NP.ones(ni+1)
x_ub = 0.05 * NP.ones(ni+1)
x_guess = NP.array(list(0.05*cos(i*h) for i in range(ni+1)))
x_lb[0] = 0; x_ub[0] = 0
x_lb[ni] = 0; x_ub[ni] = 0
u = ssym("u", ni+1)
u_lb = -inf * NP.ones(ni+1)
u_ub = inf * NP.ones(ni+1)
u_guess = 0.0 * NP.ones(ni+1)
# All variables
v = vertcat([t,x,u])
v_lb = NP.concatenate([t_lb,x_lb,u_lb])
v_ub = NP.concatenate([t_ub,x_ub,u_ub])
v_guess = NP.concatenate([t_guess,x_guess,u_guess])
# Make h, alpha symbolic once
h = SXMatrix(h)
alpha = SXMatrix(alpha)
# Objective function
f = 0
for i in range(ni):
f += 0.5*h*(u[i+1]**2 + u[i]**2) + 0.5*alpha*h*(cos(t[i+1]) + cos(t[i]))
ffcn = SXFunction([v],[f])
# Constraint function
g = []
g_lb = []
g_ub = []
for i in range(ni):
g.append(x[i+1] - x[i] - 0.5*h*(sin(t[i+1]) + sin(t[i])))
g_lb.append(0)
g_ub.append(0)
for i in range(ni):
g.append(t[i+1] - t[i] - 0.5*h*u[i+1] - 0.5*h*u[i])
g_lb.append(0)
g_ub.append(0)
g = vertcat(g)
g_lb = NP.array(g_lb)
g_ub = NP.array(g_ub)
gfcn = SXFunction([v],[g])
# NLP solver
nlp_solver = IpoptSolver(ffcn,gfcn)
#nlp_solver.setOption("max_iter",10)
nlp_solver.setOption("generate_hessian",True)
nlp_solver.setOption("linear_solver","ma57")
nlp_solver.init()
nlp_solver.setInput(v_guess, "x0")
nlp_solver.setInput(v_lb, "lbx")
nlp_solver.setInput(v_ub, "ubx")
nlp_solver.setInput(g_lb, "lbg")
nlp_solver.setInput(g_ub, "ubg")
nlp_solver.solve()
| lgpl-3.0 |
newemailjdm/scipy | scipy/signal/fir_filter_design.py | 40 | 20637 | """Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/svm/classes.py | 9 | 44254 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
C : float, optional (default=1.0)
Penalty parameter C of the error term.
multi_class : string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while
``"crammer_singer"`` optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual
will be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data for the dual coordinate descent (if ``dual=True``). When
``dual=False`` the underlying implementation of :class:`LinearSVC`
is not random and ``random_state`` has no effect on the results. If
int, random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = LinearSVC(random_state=0)
>>> clf.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> print(clf.coef_)
[[ 0.08551385 0.39414796 0.49847831 0.37513797]]
>>> print(clf.intercept_)
[ 0.28418066]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss, sample_weight=sample_weight)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import LinearSVR
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = LinearSVR(random_state=0)
>>> regr.fit(X, y)
LinearSVR(C=1.0, dual=True, epsilon=0.0, fit_intercept=True,
intercept_scaling=1.0, loss='epsilon_insensitive', max_iter=1000,
random_state=0, tol=0.0001, verbose=0)
>>> print(regr.coef_)
[ 16.35750999 26.91499923 42.30652207 60.47843124]
>>> print(regr.intercept_)
[-4.29756543]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-4.29756543]
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
if self.loss in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(self.loss)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon, sample_weight=sample_weight)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr', default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator used when shuffling
the data for probability estimates. If int, random_state is the
seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random
number generator is the RandomState instance used by `np.random`.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape='ovr',
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically
adjust weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr', default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator used when shuffling
the data for probability estimates. If int, random_state is the seed
used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random
number generator is the RandomState instance used by `np.random`.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
shrinking=True, probability=False, tol=1e-3, cache_size=200,
class_weight=None, verbose=False, max_iter=-1,
decision_function_shape='ovr', random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int, RandomState instance or None, optional (default=None)
Ignored.
.. deprecated:: 0.20
``random_state`` has been deprecated in 0.20 and will be removed in
0.22.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [1,]
Constant in the decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
if self.random_state is not None:
warnings.warn("The random_state parameter is deprecated and will"
" be removed in version 0.22.", DeprecationWarning)
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)),
sample_weight=sample_weight, **params)
return self
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an outlier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
def predict(self, X):
"""
Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(OneClassSVM, self).predict(X)
return np.asarray(y, dtype=np.intp)
| bsd-3-clause |
yunque/librosa | librosa/core/time_frequency.py | 1 | 24239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Time and frequency utilities'''
import numpy as np
import re
import six
from ..util.exceptions import ParameterError
__all__ = ['frames_to_samples', 'frames_to_time',
'samples_to_frames', 'samples_to_time',
'time_to_samples', 'time_to_frames',
'note_to_hz', 'note_to_midi',
'midi_to_hz', 'midi_to_note',
'hz_to_note', 'hz_to_midi',
'hz_to_mel', 'hz_to_octs',
'mel_to_hz',
'octs_to_hz',
'fft_frequencies',
'cqt_frequencies',
'mel_frequencies',
'A_weighting']
def frames_to_samples(frames, hop_length=512, n_fft=None):
"""Converts frame indices to audio sample indices
Parameters
----------
frames : np.ndarray [shape=(n,)]
vector of frame indices
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
Returns
-------
times : np.ndarray [shape=(n,)]
time (in seconds) of each given frame number:
`times[i] = frames[i] * hop_length`
See Also
--------
frames_to_time : convert frame indices to time values
samples_to_frames : convert sample indices to frame indices
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr)
>>> beat_samples = librosa.frames_to_samples(beats)
"""
offset = 0
if n_fft is not None:
offset = int(n_fft // 2)
return (np.atleast_1d(frames) * hop_length + offset).astype(int)
def samples_to_frames(samples, hop_length=512, n_fft=None):
"""Converts sample indices into STFT frames.
Examples
--------
>>> # Get the frame numbers for every 256 samples
>>> librosa.samples_to_frames(np.arange(0, 22050, 256))
array([ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34,
35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43])
Parameters
----------
samples : np.ndarray [shape=(n,)]
vector of sample indices
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `- n_fft / 2`
to counteract windowing effects in STFT.
.. note:: This may result in negative frame indices.
Returns
-------
frames : np.ndarray [shape=(n,), dtype=int]
Frame numbers corresponding to the given times:
`frames[i] = floor( samples[i] / hop_length )`
See Also
--------
samples_to_time : convert sample indices to time values
frames_to_samples : convert frame indices to sample indices
"""
offset = 0
if n_fft is not None:
offset = int(n_fft // 2)
samples = np.atleast_1d(samples)
return np.floor((samples - offset) // hop_length).astype(int)
def frames_to_time(frames, sr=22050, hop_length=512, n_fft=None):
"""Converts frame counts to time (seconds)
Parameters
----------
frames : np.ndarray [shape=(n,)]
vector of frame numbers
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
Returns
-------
times : np.ndarray [shape=(n,)]
time (in seconds) of each given frame number:
`times[i] = frames[i] * hop_length / sr`
See Also
--------
time_to_frames : convert time values to frame indices
frames_to_samples : convert frame indices to sample indices
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr)
>>> beat_times = librosa.frames_to_time(beats, sr=sr)
"""
samples = frames_to_samples(frames,
hop_length=hop_length,
n_fft=n_fft)
return samples_to_time(samples, sr=sr)
def time_to_frames(times, sr=22050, hop_length=512, n_fft=None):
"""Converts time stamps into STFT frames.
Parameters
----------
times : np.ndarray [shape=(n,)]
vector of time stamps
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `- n_fft / 2`
to counteract windowing effects in STFT.
.. note:: This may result in negative frame indices.
Returns
-------
frames : np.ndarray [shape=(n,), dtype=int]
Frame numbers corresponding to the given times:
`frames[i] = floor( times[i] * sr / hop_length )`
See Also
--------
frames_to_time : convert frame indices to time values
time_to_samples : convert time values to sample indices
Examples
--------
Get the frame numbers for every 100ms
>>> librosa.time_to_frames(np.arange(0, 1, 0.1),
... sr=22050, hop_length=512)
array([ 0, 4, 8, 12, 17, 21, 25, 30, 34, 38])
"""
samples = time_to_samples(times, sr=sr)
return samples_to_frames(samples, hop_length=hop_length, n_fft=n_fft)
def time_to_samples(times, sr=22050):
'''Convert timestamps (in seconds) to sample indices.
Parameters
----------
times : np.ndarray
Array of time values (in seconds)
sr : number > 0
Sampling rate
Returns
-------
samples : np.ndarray [shape=times.shape, dtype=int]
Sample indices corresponding to values in `times`
See Also
--------
time_to_frames : convert time values to frame indices
samples_to_time : convert sample indices to time values
Examples
--------
>>> librosa.time_to_samples(np.arange(0, 1, 0.1), sr=22050)
array([ 0, 2205, 4410, 6615, 8820, 11025, 13230, 15435,
17640, 19845])
'''
return (np.atleast_1d(times) * sr).astype(int)
def samples_to_time(samples, sr=22050):
'''Convert sample indices to time (in seconds).
Parameters
----------
samples : np.ndarray
Array of sample indices
sr : number > 0
Sampling rate
Returns
-------
times : np.ndarray [shape=samples.shape, dtype=int]
Time values corresponding to `samples` (in seconds)
See Also
--------
samples_to_frames : convert sample indices to frame indices
time_to_samples : convert time values to sample indices
Examples
--------
Get timestamps corresponding to every 512 samples
>>> librosa.samples_to_time(np.arange(0, 22050, 512))
array([ 0. , 0.023, 0.046, 0.07 , 0.093, 0.116, 0.139,
0.163, 0.186, 0.209, 0.232, 0.255, 0.279, 0.302,
0.325, 0.348, 0.372, 0.395, 0.418, 0.441, 0.464,
0.488, 0.511, 0.534, 0.557, 0.58 , 0.604, 0.627,
0.65 , 0.673, 0.697, 0.72 , 0.743, 0.766, 0.789,
0.813, 0.836, 0.859, 0.882, 0.906, 0.929, 0.952,
0.975, 0.998])
'''
return np.atleast_1d(samples) / float(sr)
def note_to_hz(note, **kwargs):
'''Convert one or more note names to frequency (Hz)
Examples
--------
>>> # Get the frequency of a note
>>> librosa.note_to_hz('C')
array([ 16.352])
>>> # Or multiple notes
>>> librosa.note_to_hz(['A3', 'A4', 'A5'])
array([ 220., 440., 880.])
>>> # Or notes with tuning deviations
>>> librosa.note_to_hz('C2-32', round_midi=False)
array([ 64.209])
Parameters
----------
note : str or iterable of str
One or more note names to convert
kwargs : additional keyword arguments
Additional parameters to `note_to_midi`
Returns
-------
frequencies : np.ndarray [shape=(len(note),)]
Array of frequencies (in Hz) corresponding to `note`
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
'''
return midi_to_hz(note_to_midi(note, **kwargs))
def note_to_midi(note, round_midi=True):
'''Convert one or more spelled notes to MIDI number(s).
Notes may be spelled out with optional accidentals or octave numbers.
The leading note name is case-insensitive.
Sharps are indicated with ``#``, flats may be indicated with ``!`` or ``b``.
Parameters
----------
note : str or iterable of str
One or more note names.
round_midi : bool
- If `True`, allow for fractional midi notes
- Otherwise, round cent deviations to the nearest note
Returns
-------
midi : float or np.array
Midi note numbers corresponding to inputs.
Raises
------
ParameterError
If the input is not in valid note format
See Also
--------
midi_to_note
note_to_hz
Examples
--------
>>> librosa.note_to_midi('C')
12
>>> librosa.note_to_midi('C#3')
49
>>> librosa.note_to_midi('f4')
65
>>> librosa.note_to_midi('Bb-1')
10
>>> librosa.note_to_midi('A!8')
116
>>> # Lists of notes also work
>>> librosa.note_to_midi(['C', 'E', 'G'])
array([12, 16, 19])
'''
if not isinstance(note, six.string_types):
return np.array([note_to_midi(n, round_midi=round_midi) for n in note])
pitch_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
acc_map = {'#': 1, '': 0, 'b': -1, '!': -1}
match = re.match(r'^(?P<note>[A-Ga-g])'
r'(?P<accidental>[#b!]*)'
r'(?P<octave>[+-]?\d+)?'
r'(?P<cents>[+-]\d+)?$',
note)
if not match:
raise ParameterError('Improper note format: {:s}'.format(note))
pitch = match.group('note').upper()
offset = np.sum([acc_map[o] for o in match.group('accidental')])
octave = match.group('octave')
cents = match.group('cents')
if not octave:
octave = 0
else:
octave = int(octave)
if not cents:
cents = 0
else:
cents = int(cents) * 1e-2
note_value = 12 * (octave + 1) + pitch_map[pitch] + offset + cents
if round_midi:
note_value = int(np.round(note_value))
return note_value
def midi_to_note(midi, octave=True, cents=False):
'''Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
'''
if cents and not octave:
raise ParameterError('Cannot encode cents without octave information.')
if not np.isscalar(midi):
return [midi_to_note(x, octave=octave, cents=cents) for x in midi]
note_map = ['C', 'C#', 'D', 'D#',
'E', 'F', 'F#', 'G',
'G#', 'A', 'A#', 'B']
note_num = int(np.round(midi))
note_cents = int(100 * np.around(midi - note_num, 2))
note = note_map[note_num % 12]
if octave:
note = '{:s}{:0d}'.format(note, int(note_num / 12) - 1)
if cents:
note = '{:s}{:+02d}'.format(note, note_cents)
return note
def midi_to_hz(notes):
"""Get the frequency (Hz) of MIDI note(s)
Examples
--------
>>> librosa.midi_to_hz(36)
array([ 65.406])
>>> librosa.midi_to_hz(np.arange(36, 48))
array([ 65.406, 69.296, 73.416, 77.782, 82.407,
87.307, 92.499, 97.999, 103.826, 110. ,
116.541, 123.471])
Parameters
----------
notes : int or np.ndarray [shape=(n,), dtype=int]
midi number(s) of the note(s)
Returns
-------
frequency : np.ndarray [shape=(n,), dtype=float]
frequency (frequencies) of `notes` in Hz
See Also
--------
hz_to_midi
note_to_hz
"""
return 440.0 * (2.0 ** ((np.atleast_1d(notes) - 69.0)/12.0))
def hz_to_midi(frequencies):
"""Get the closest MIDI note number(s) for given frequencies
Examples
--------
>>> librosa.hz_to_midi(60)
array([ 34.506])
>>> librosa.hz_to_midi([110, 220, 440])
array([ 45., 57., 69.])
Parameters
----------
frequencies : float or np.ndarray [shape=(n,), dtype=float]
frequencies to convert
Returns
-------
note_nums : np.ndarray [shape=(n,), dtype=int]
closest MIDI notes to `frequencies`
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
"""
return 12 * (np.log2(np.atleast_1d(frequencies)) - np.log2(440.0)) + 69
def hz_to_note(frequencies, **kwargs):
'''Convert one or more frequencies (in Hz) to the nearest note names.
Parameters
----------
frequencies : float or iterable of float
Input frequencies, specified in Hz
kwargs : additional keyword arguments
Arguments passed through to `midi_to_note`
Returns
-------
notes : list of str
`notes[i]` is the closest note name to `frequency[i]`
(or `frequency` if the input is scalar)
See Also
--------
hz_to_midi
midi_to_note
note_to_hz
Examples
--------
Get a single note name for a frequency
>>> librosa.hz_to_note(440.0)
['A5']
Get multiple notes with cent deviation
>>> librosa.hz_to_note([32, 64], cents=True)
['C1-38', 'C2-38']
Get multiple notes, but suppress octave labels
>>> librosa.hz_to_note(440.0 * (2.0 ** np.linspace(0, 1, 12)),
... octave=False)
['A', 'A#', 'B', 'C', 'C#', 'D', 'E', 'F', 'F#', 'G', 'G#', 'A']
'''
return midi_to_note(hz_to_midi(frequencies), **kwargs)
def hz_to_mel(frequencies, htk=False):
"""Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
array([ 0.9])
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz
"""
frequencies = np.atleast_1d(frequencies)
if htk:
return 2595.0 * np.log10(1.0 + frequencies / 700.0)
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
log_t = (frequencies >= min_log_hz)
mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep
return mels
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
array([ 200.])
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.atleast_1d(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def hz_to_octs(frequencies, A440=440.0):
"""Convert frequencies (Hz) to (fractional) octave numbers.
Examples
--------
>>> librosa.hz_to_octs(440.0)
array([ 4.])
>>> librosa.hz_to_octs([32, 64, 128, 256])
array([ 0.219, 1.219, 2.219, 3.219])
Parameters
----------
frequencies : np.ndarray [shape=(n,)] or float
scalar or vector of frequencies
A440 : float
frequency of A440 (in Hz)
Returns
-------
octaves : np.ndarray [shape=(n,)]
octave number for each frequency
See Also
--------
octs_to_hz
"""
return np.log2(np.atleast_1d(frequencies) / (float(A440) / 16))
def octs_to_hz(octs, A440=440.0):
"""Convert octaves numbers to frequencies.
Octaves are counted relative to A.
Examples
--------
>>> librosa.octs_to_hz(1)
array([ 55.])
>>> librosa.octs_to_hz([-2, -1, 0, 1, 2])
array([ 6.875, 13.75 , 27.5 , 55. , 110. ])
Parameters
----------
octaves : np.ndarray [shape=(n,)] or float
octave number for each frequency
A440 : float
frequency of A440
Returns
-------
frequencies : np.ndarray [shape=(n,)]
scalar or vector of frequencies
See Also
--------
hz_to_octs
"""
return (float(A440) / 16)*(2.0**np.atleast_1d(octs))
def fft_frequencies(sr=22050, n_fft=2048):
'''Alternative implementation of `np.fft.fftfreqs`
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
Returns
-------
freqs : np.ndarray [shape=(1 + n_fft/2,)]
Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`
Examples
--------
>>> librosa.fft_frequencies(sr=22050, n_fft=16)
array([ 0. , 1378.125, 2756.25 , 4134.375,
5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])
'''
return np.linspace(0,
float(sr) / 2,
int(1 + n_fft//2),
endpoint=True)
def cqt_frequencies(n_bins, fmin, bins_per_octave=12, tuning=0.0):
"""Compute the center frequencies of Constant-Q bins.
Examples
--------
>>> # Get the CQT frequencies for 24 notes, starting at C2
>>> librosa.cqt_frequencies(24, fmin=librosa.note_to_hz('C2'))
array([ 65.406, 69.296, 73.416, 77.782, 82.407, 87.307,
92.499, 97.999, 103.826, 110. , 116.541, 123.471,
130.813, 138.591, 146.832, 155.563, 164.814, 174.614,
184.997, 195.998, 207.652, 220. , 233.082, 246.942])
Parameters
----------
n_bins : int > 0 [scalar]
Number of constant-Q bins
fmin : float > 0 [scalar]
Minimum frequency
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)`
Deviation from A440 tuning in fractional bins (cents)
Returns
-------
frequencies : np.ndarray [shape=(n_bins,)]
Center frequency for each CQT bin
"""
correction = 2.0**(float(tuning) / bins_per_octave)
frequencies = 2.0**(np.arange(0, n_bins, dtype=float) / bins_per_octave)
return correction * fmin * frequencies
def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0, htk=False):
"""Compute the center frequencies of mel bands.
Parameters
----------
n_mels : int > 0 [scalar]
number of Mel bins
fmin : float >= 0 [scalar]
minimum frequency (Hz)
fmax : float >= 0 [scalar]
maximum frequency (Hz)
htk : bool
use HTK formula instead of Slaney
Returns
-------
bin_frequencies : ndarray [shape=(n_mels,)]
vector of n_mels frequencies in Hz which are uniformly spaced on the Mel
axis.
Examples
--------
>>> librosa.mel_frequencies(n_mels=40)
array([ 0. , 85.317, 170.635, 255.952,
341.269, 426.586, 511.904, 597.221,
682.538, 767.855, 853.173, 938.49 ,
1024.856, 1119.114, 1222.042, 1334.436,
1457.167, 1591.187, 1737.532, 1897.337,
2071.84 , 2262.393, 2470.47 , 2697.686,
2945.799, 3216.731, 3512.582, 3835.643,
4188.417, 4573.636, 4994.285, 5453.621,
5955.205, 6502.92 , 7101.009, 7754.107,
8467.272, 9246.028, 10096.408, 11025. ])
"""
# 'Center freqs' of mel bands - uniformly spaced between limits
min_mel = hz_to_mel(fmin, htk=htk)
max_mel = hz_to_mel(fmax, htk=htk)
mels = np.linspace(min_mel, max_mel, n_mels)
return mel_to_hz(mels, htk=htk)
# A-weighting should be capitalized: suppress the naming warning
def A_weighting(frequencies, min_db=-80.0): # pylint: disable=invalid-name
'''Compute the A-weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
A_weighting : scalar or np.ndarray [shape=(n,)]
`A_weighting[i]` is the A-weighting of `frequencies[i]`
See Also
--------
perceptual_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> aw = librosa.A_weighting(freqs)
>>> plt.plot(freqs, aw)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Weighting (log10)')
>>> plt.title('A-Weighting of CQT frequencies')
'''
# Vectorize to make our lives easier
frequencies = np.atleast_1d(frequencies)
# Pre-compute squared frequency
f_sq = frequencies**2.0
const = np.array([12200, 20.6, 107.7, 737.9])**2.0
weights = 2.0 + 20.0 * (np.log10(const[0]) + 4 * np.log10(frequencies)
- np.log10(f_sq + const[0])
- np.log10(f_sq + const[1])
- 0.5 * np.log10(f_sq + const[2])
- 0.5 * np.log10(f_sq + const[3]))
if min_db is not None:
weights = np.maximum(min_db, weights)
return weights
| isc |
macks22/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
fyffyt/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
lcharleux/numerical_analysis | doc/Optimisation/Example_code/chemin.py | 1 | 2838 | #------------------------------------------------------------------------
# RECHERCHE DU CHEMIN LE PLUS RAPIDE ENTRE 2 POINTS A ET B
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# PACKAGES
from scipy import optimize as opt # Optimize
import numpy as np # Numpy
import matplotlib.pyplot as plt # Pyplot
from matplotlib import cm # Colormaps
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# POSITION DES POINTS ET DONNEES PHYSIQUES
xa, xb = 0., 1.
ya, yb = 1., 0.
m = 1. # masse en kg
g = 10. # gravite en ms**-2
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# MAILLAGE EN X
Np = 30 # Nombre de noeuds souhaites
X = np.linspace(xa, xb, Np) # Coordonnees en x des noeuds
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# PROPOSITION D'UNE SOLUTION INTUITIVE: LA LIGNE DROITE
Y = ya + (yb - ya) / (xb - xa) * X # Altitude des noeuds
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# CALCUL DU TEMPS DE PARCOURS
def temps(Y):
# On calcule l'energie potentielle en supposant qu'elle est nulle en A
Ep = m * g * (Y - Y[0])
# On calcule l'energie cinetique (via le theoreme de l'energie cinetique integre)
Ec = - Ep
# On calcule la vitesse
V = (2. / m * Ec) **.5
# On calcule la vitesse moyenne sur chaque element
Ve = (V[1:] + V[:-1]) / 2.
# On calcule le pas en X:
dx = X[1] - X[0]
# On calcule la longueur de chaque element
Le = ( ( Y[1:] - Y[:-1] )**2 + dx**2)**.5
# On calcule le temps de parcours par element
te = Le / Ve
# On calcule le temps de parcours total
t = te.sum()
return t, V
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# MISE EN APPLICATION
t, V = temps(Y)
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# AFFICHAGE
fig = plt.figure(0)
ax = fig.add_subplot(211)
plt.title('Temps de parcours: $t = {0:.2f} s$'.format(t))
plt.ylabel('Altitude $Y$')
plt.plot(X, Y)
plt.grid()
ax = fig.add_subplot(212)
plt.xlabel('Position $X$')
plt.ylabel('Vitesse $V$')
plt.plot(X, V)
plt.grid()
plt.show()
#------------------------------------------------------------------------
| gpl-2.0 |
char-lie/statistical-analysis | course/Notebook/common.py | 1 | 6280 | import matplotlib.pyplot as plt
from numpy import loadtxt, arange, log, delete, array, ones, concatenate, vstack, corrcoef
from numpy.linalg import lstsq
from scipy.stats import f
from scipy.stats.mstats import normaltest
from pandas.stats.moments import ewma
from pandas import Series
class TimeSeries:
def __init__(self, data, lag=2):
self.data = data
self.span = get_best_span(self.data)
self.smoothed = data_smoother(self.data, self.span)
self.trend_errors = self.data - self.smoothed
self.autocorrelation = array(
[Series(self.trend_errors).autocorr(i)
for i in range(self.trend_errors.size // 4)])
errors_coefficients, errors_estimate = get_errors_estimate(
self.trend_errors, lag)
self.errors_coefficients = errors_coefficients
self.errors_estimate = errors_estimate
self.data_estimate = (self.smoothed
+ concatenate(([0] * lag, self.errors_estimate)))
self.estimate_errors = self.data - self.data_estimate
self.alpha = 2. / (self.span + 1)
S1 = get_S(self.smoothed, self.alpha)
S2 = get_S(S1, self.alpha)
S3 = get_S(S2, self.alpha)
a0 = get_a0(S1, S2, S3, self.alpha)
a1 = get_a1(S1, S2, S3, self.alpha)
a2 = get_a2(S1, S2, S3, self.alpha)
trend_forecast = [forecast(a0, a1, a2, steps) for steps in (1, 2, 3)]
self.trend_forecasted = concatenate((self.smoothed, trend_forecast))
self.errors_forecasted = concatenate(
(self.errors_estimate,
get_errors_forecast(
self.errors_estimate, self.errors_coefficients, 3)))
self.data_forecasted = (
self.trend_forecasted
+ concatenate(([0] * lag, self.errors_forecasted)))
test_head = array(self.data_forecasted[:6])
test_tail = [forecast(a0, a1, a2, 3, time)
+ get_errors_forecast(self.errors_estimate, self.errors_coefficients, 3, time - 3)[2]
for time in range(3, self.data.size)]
self.forecast_test = concatenate((test_head, test_tail))
def data_smoother(data, span):
smoothed = ewma(data, span=span)
errors = data - smoothed
return smoothed# + errors.mean()
def get_fixed_std(data_series, position, span):
data_series = delete(data_series, position)
return (data_series - data_smoother(data_series, span=span)).std()
# Anomalies
def get_f_test_values(data_series, span):
smoothed_series = data_smoother(data_series, span=span)
anomalies_stds = [get_fixed_std(data_series, i, span) for i in range(data_series.size)]
anomalies = array([(data_series - smoothed_series).std() / std for std in anomalies_stds])**2
return anomalies
def get_f_weights(anomalies):
return array([1 - f.cdf(anomaly, anomalies.size - 1, anomalies.size - 2) for anomaly in anomalies])
def display_variances_without_anomalies(time, data, span, anomaly_points=()):
data = data.copy()
for anomaly_point in anomaly_points:
data[anomaly_point] = .5 * (
data[anomaly_point - 1] + data[anomaly_point + 1])
anomalies = get_f_test_values(data, span)
f_test = get_f_weights(anomalies)
plt.plot(time, f_test)
plt.suptitle('F-test tail weights', fontweight='bold')
if len(anomaly_points) > 0:
plt.title('Removed anomalies ' + ', '.join(['$%d$'%p for p in anomaly_points]))
plt.xlabel('Anomaly point')
plt.ylabel('Tail weight')
plt.show()
return f_test.min(), f_test.argmin()
# Errors
def get_smoothing_errors_normality(data, span):
return normaltest(data - data_smoother(data, span=span))
def get_best_span(data, min_span=2, max_span=10):
return (min_span
+ array([get_smoothing_errors_normality(data, s).statistic
for s in range(min_span, max_span + 1)]).argmin())
def get_errors_estimate(errors, lag):
lag = 2
result_errors = errors[lag:]
regression_errors = array([concatenate((errors[i - lag:i], [1])) for i in range(lag, errors.size)])
coefficients = lstsq(regression_errors, result_errors)[0]
autoregressors = (coefficients*regression_errors).sum(axis=1)
return coefficients, autoregressors
# Forecast
# Trend
def get_S(data, alpha):
result = [alpha * data[0]]
for value in data[1:]:
result.append(alpha * value + (1 - alpha) * result[-1])
return result
def get_a0(S1, S2, S3, alpha):
return [3 * (s1 - s2) + s3 for s1, s2, s3 in zip(S1, S2, S3)]
def get_a1(S1, S2, S3, alpha):
return [(alpha / (2 * ((1 - alpha)**2))) * (
(6 - 5 * alpha) * s1
- 2 * (5 - 4 * alpha) * s2
+ (4 - 3 * alpha) * s3) for s1, s2, s3 in zip(S1, S2, S3)]
def get_a2(S1, S2, S3, alpha):
return [((alpha**2) / ((1 - alpha)**2)) * (s1 - 2 * s2) + s3 for s1, s2, s3 in zip(S1, S2, S3)]
def forecast(a0, a1, a2, steps, time=-1):
return a0[time] + a1[time] * steps # + a2[time] * (steps**2) * .5
# Errors
def get_errors_forecast(regression_errors, coefficients, steps=1, time=-1):
errors = regression_errors[time - 1], regression_errors[time]
forecast = []
for _ in range(steps):
forecast.append(
coefficients[0] * errors[0]
+ coefficients[1] * errors[1]
+ coefficients[2])
errors = errors[1], forecast[-1]
return forecast
# Regressors model
def get_residuum(y, regressors, coefficients):
return y - array([c * r for c, r in zip(regressors, coefficients)]).sum(axis=0)[:y.size]
def get_coeffs(y, regressors, coefficients, sequences):
residuum = get_residuum(y, regressors, coefficients)
A = vstack([[residuum], sequences[:, :y.size]])
best = abs(corrcoef(A)[0][1:]).argmax()
regressor = sequences[best]
sequences = delete(sequences, best, axis=0)
regressors += [regressor]
cut_regressors = array(regressors)[..., :y.size]
transposed_regressors = cut_regressors.T
coefficients = lstsq(transposed_regressors, y)[0]
new_residuum = get_residuum(y, regressors, coefficients)
S = (residuum**2).sum(), (new_residuum**2).sum()
F = (y.size - len(regressors)) * (S[0] - S[1]) / S[1]
return regressors, coefficients, sequences, F, best
| mit |
akrherz/idep | scripts/util/diagnose_huc12.py | 2 | 2245 | """Do some diagnostics on what the raw DEP files are telling us"""
import datetime
import sys
import glob
import pandas as pd
from pandas.io.sql import read_sql
from pyiem import dep
from pyiem.util import get_dbconn
YEARS = datetime.date.today().year - 2007 + 1
CONV = 4.463
def get_lengths(huc12, scenario):
"""Figure out our slope lengths."""
pgconn = get_dbconn("idep")
return read_sql(
"SELECT fpath, ST_Length(geom) as length from flowpaths "
"where scenario = %s and huc_12 = %s",
pgconn,
params=(scenario, huc12),
index_col="fpath",
)
def summarize_hillslopes(huc12, scenario):
"""Print out top hillslopes"""
lengths = get_lengths(huc12, scenario)
envs = glob.glob(
"/i/%s/env/%s/%s/*.env" % (scenario, huc12[:8], huc12[8:])
)
dfs = []
for env in envs:
df = dep.read_env(env)
df["flowpath"] = int(env.split("/")[-1].split("_")[1][:-4])
dfs.append(df)
df = pd.concat(dfs)
df = df.join(lengths, on="flowpath")
df["delivery_ta"] = df["sed_del"] / df["length"] * CONV / YEARS
flowpath = 424 # df2.index[0]
print(
df[df["flowpath"] == flowpath]
.groupby("year")
.sum()
.sort_index(ascending=True)
)
df2 = (
df[["delivery_ta", "flowpath"]]
.groupby("flowpath")
.sum()
.sort_values("delivery_ta", ascending=False)
)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY TOTALS")
print(df2.head())
print("==== TOP 5 LOWEST SEDIMENT DELIVERY TOTALS")
print(df2.tail())
print(df2.loc[flowpath])
df2 = df[df["flowpath"] == flowpath].sort_values(
"sed_del", ascending=False
)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY FOR %s" % (flowpath,))
print(df2[["date", "sed_del", "precip", "runoff", "av_det"]].head())
df3 = df2.groupby("year").sum().sort_values("sed_del", ascending=False)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY EVENTS FOR %s" % (flowpath,))
print(df3[["sed_del", "precip", "runoff", "av_det"]].head())
def main(argv):
"""Go Main"""
huc12 = argv[1]
scenario = argv[2]
summarize_hillslopes(huc12, scenario)
if __name__ == "__main__":
main(sys.argv)
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/series/test_quantile.py | 7 | 7083 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData):
def test_quantile(self):
q = self.ts.quantile(0.1)
assert q == np.percentile(self.ts.valid(), 10)
q = self.ts.quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
assert q == Timestamp('2000-01-10 19:12:00')
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
assert q == pd.to_timedelta('24:00:00')
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
assert result is pd.NaT
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([np.percentile(self.ts.valid(), 10),
np.percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# test with and without interpolation keyword
assert q == q1
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
@pytest.mark.skipif(not _np_version_under1p9,
reason="Numpy version is greater 1.9")
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# interpolation other than linear
msg = "Interpolation methods other than "
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assert_raises_regex(ValueError, msg):
Series(self.ts, dtype=object).quantile(0.7, interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]]
for case in cases:
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isnull(Series([], dtype='M8[ns]').quantile(.5))
assert pd.isnull(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/traitlets/config/loader.py | 13 | 28772 | # encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from ipython_genutils.path import filefind
from ipython_genutils import py3compat
from ipython_genutils.encoding import DEFAULT_ENCODING
from six import text_type
from traitlets.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in other.items():
if k not in self:
to_update[k] = v
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = v
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from traitlets.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`traitlets.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config
Can also act as a context manager that rewrite the configuration file to disk on exit.
Example::
with JSONFileConfigLoader('myapp.json','/home/jupyter/configurations/') as c:
c.MyNewConfigurable.new_value = 'Updated'
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
def __enter__(self):
self.load_config()
return self.config
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the context manager but do not handle any errors.
In case of any error, we do not want to write the potentially broken
configuration to disk.
"""
self.config.version = 1
json_config = json.dumps(self.config, indent=2)
with open(self.full_filename, 'w') as f:
f.write(json_config)
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def load_subconfig(self, fname, path=None):
"""Injected into config file namespace as load_subconfig"""
if path is None:
path = self.path
loader = self.__class__(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there,
# treat it as an empty config file.
pass
else:
self.config.merge(sub_config)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
def get_config():
"""Unnecessary now, but a deprecation warning is more trouble than it's worth."""
return self.config
namespace = dict(
c=self.config,
load_subconfig=self.load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in cfg.items():
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from traitlets.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, text_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).items():
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in aliases.items():
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=text_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=text_type, dest=value, nargs=nargs)
for key, (value, help) in flags.items():
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).items():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/tree/tree.py | 1 | 44306 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.increasing = increasing
self.decreasing = decreasing
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
def _encode_monotonic(increasing, decreasing):
if increasing is None: increasing = []
if decreasing is None: decreasing = []
def is_int_in_range(feature):
return isinstance(feature, int) and 0 <= feature < self.n_features_
def is_valid(features):
return (isinstance(features, list) and
all(is_int_in_range(feature) for feature in features))
if not is_valid(increasing):
raise ValueError("increasing should be a list of ints in the range [0,n_features].")
if not is_valid(decreasing):
raise ValueError("decreasing should be a list of ints in the range [0,n_features].")
if increasing and decreasing:
intersection = set(increasing) & set(decreasing)
if intersection:
raise ValueError("The following features cannot be both increasing and decreasing: " + str(list(intersection)))
monotonic = np.zeros(self.n_features_, dtype=np.int32)
if increasing:
for feature in increasing:
monotonic[feature] = 1
if decreasing:
for feature in decreasing:
monotonic[feature] = -1
return monotonic
monotonic = _encode_monotonic(self.increasing, self.decreasing)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort,
monotonic)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
increasing=None,
decreasing=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None,
increasing=None,
decreasing=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
| bsd-3-clause |
ua-snap/downscale | old/old_bin/sort_files_by_rcp.py | 2 | 1564 | import os, glob, shutil
from pathos import multiprocessing as mp
import pandas as pd
import numpy as np
base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
output_base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = ['rsds', 'vap' ]
for model in models:
variables = os.listdir( os.path.join( base_path, model ) )
_ = [ os.makedirs( os.path.join( base_path, model, variable ) ) for variable in variables if not os.path.exists( os.path.join( base_path, model, variable ) ) ]
for variable in variables:
print( ' '.join([model, variable]) )
output_path = os.path.join( output_base_path, model, variable, 'downscaled' )
cur_path = os.path.join( base_path, model, variable, 'downscaled' )
l = pd.Series( glob.glob( os.path.join( cur_path, '*.tif' ) ) )
grouper = [ os.path.basename(i).split( '_' )[ 5 ] for i in l ]
rcp_groups = l.groupby( grouper )
name_group = [ group for group in rcp_groups ]
names = [ i[0] for i in name_group ]
_ = [ os.makedirs( os.path.join( output_path, name ) ) for name in names if not os.path.exists( os.path.join( output_path, name ) ) ]
for count, name in enumerate( names ):
print count
group = name_group[ count ]
out_group = [ os.path.join( output_path, name, os.path.basename( i ) ) for i in group[1] ]
def run( x, y ):
import shutil
return shutil.move( x, y )
pool = mp.Pool( 15 )
out = pool.map( lambda x: run(x[0], x[1]), zip( group[1], out_group ) )
pool.close()
| mit |
lukas/ml-class | examples/scikit/cross-validation-log.py | 2 | 1041 | import pandas as pd
import numpy as np
import wandb
run = wandb.init(job_type='eval')
config = run.config
config.lowercase=True
config.ngram_min=1
config.ngram_max=1
df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(lowercase=config.lowercase,
ngram_range=(config.ngram_min,
config.ngram_max)
)
count_vect.fit(fixed_text)
counts = count_vect.transform(fixed_text)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
from sklearn.model_selection import cross_val_score, cross_val_predict
scores = cross_val_score(nb, counts, fixed_target)
print(scores)
print(scores.mean())
predictions = cross_val_predict(nb, counts, fixed_target)
wandb.log({"Accuracy": scores.mean()})
| gpl-2.0 |
tperol/ConvNetQuake | bin/viz/misclassified_loc.py | 1 | 6824 | #!/usr/bin/env python
# -------------------------------------------------------------------
# File: misclassified_loc
# Author: Thibaut Perol <[email protected]>
# Created: 2016-11-16
# ------------------------------------------------------------------#
""" plot misclassified windows with their predicted
and true labels on a risk map
e.g.,
./bin/viz/misclassified_loc --dataset data/6_clusters/see/mseed
--checkpoint_dir output/odyssey/6_clusters/BestOne_100det_71loc --model
ClusterPredictionBest --output here/wrong --n_clusters 6
Note: In this case we have store the test event traces in mseed format in
data/6_clusters/see/mseed using
./bin/data/create_dataset_enents.py \
--stream_dir data/eval_streams/ --catalog
data/6_clusters/catalog_with_cluster_ids.csv --output_dir data/6_clusters/see
--save_mseed True
misclassified_loc read the mseed file with the name that indicates the label,
the lat and long in order to plot the test event on the risk map produced by
ConvNetQuake
"""
import os
import setproctitle
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import shutil
import quakenet.models as models
from quakenet.data_pipeline import DataPipeline
import quakenet.config as config
from quakenet.data_io import load_stream
flags = tf.flags
flags.DEFINE_string('dataset',
None, 'path to the records to validate on.')
flags.DEFINE_string('checkpoint_dir',
None, 'path to the directory of checkpoints')
flags.DEFINE_integer('step', None, 'step to load')
flags.DEFINE_integer('n_clusters', None, 'n of clusters')
flags.DEFINE_string('model',
None, 'model name to load')
flags.DEFINE_string('output', 'output/misclassified',
'dir of plotted misclassified windows')
args = flags.FLAGS
# TODO: Allow for variable length of window
print "ATTENTION: FOR NOW WINDOWS' LENGTH = 10 SECONDS"
def fetch_window_and_label(stream_name):
"""Load window stream, extract data and label"""
stream = load_stream(stream_name)
data = np.empty((1001, 3))
for i in range(3):
data[:, i] = stream[i].data.astype(np.float32)
data = np.expand_dims(data, 0)
stream_name = os.path.split(stream_name)[-1]
label = np.empty((1,))
label[0] = stream_name.split("_")[1]
return data, label
def fetch_lat_and_lon(stream):
lat = stream.split("_")[4]
lon = stream.split(".mseed")[0].split("_")[-1]
return float(lat), float(lon)
def fetch_streams_list(datadir):
"""Get the list of streams to analyze"""
fnames = []
for root, dirs, files in os.walk(datadir):
for f in files:
if f.endswith(".mseed"):
fnames.append(os.path.join(root, f))
return fnames
def plot_proba_map(i, lat,lon, clusters, class_prob, label,
lat_event, lon_event):
plt.clf()
class_prob = class_prob / np.sum(class_prob)
assert np.isclose(np.sum(class_prob),1)
risk_map = np.zeros_like(clusters,dtype=np.float64)
for cluster_id in range(len(class_prob)):
x,y = np.where(clusters == cluster_id)
risk_map[x,y] = class_prob[cluster_id]
plt.contourf(lon,lat,risk_map,cmap='YlOrRd',alpha=0.9,
origin='lower',vmin=0.0,vmax=1.0)
plt.colorbar()
plt.plot(lon_event, lat_event, marker='+',c='k',lw='5')
plt.contour(lon,lat,clusters,colors='k',hold='on')
plt.xlim((min(lon),max(lon)))
plt.ylim((min(lat),max(lat)))
png_name = os.path.join(args.output,
'{}_pred_{}_label_{}.eps'.format(i,np.argmax(class_prob),
label))
plt.savefig(png_name)
plt.close()
def main(_):
setproctitle.setproctitle('quakenet_viz')
ckpt = tf.train.get_checkpoint_state(args.checkpoint_dir)
cfg = config.Config()
cfg.batch_size = 1
cfg.n_clusters = args.n_clusters
cfg.add = 1
cfg.n_clusters += 1
# Remove previous output directory
if os.path.exists(args.output):
shutil.rmtree(args.output)
os.makedirs(args.output)
windows_list = fetch_streams_list(args.dataset)
# stream data with a placeholder
samples = {
'data': tf.placeholder(tf.float32,
shape=(cfg.batch_size, 1001, 3),
name='input_data'),
'cluster_id': tf.placeholder(tf.int64,
shape=(cfg.batch_size,),
name='input_label')
}
# set up model and validation metrics
model = models.get(args.model, samples, cfg,
args.checkpoint_dir,
is_training=False)
metrics = model.validation_metrics()
with tf.Session() as sess:
model.load(sess, args.step)
print 'Evaluating at step {}'.format(sess.run(model.global_step))
step = tf.train.global_step(sess, model.global_step)
mean_metrics = {}
for key in metrics:
mean_metrics[key] = 0
for n in range(len(windows_list)):
# Get One stream and label from the list
stream, cluster_id = fetch_window_and_label(windows_list[n])
# Get coordinates of the event
lat_event, lon_event = fetch_lat_and_lon(windows_list[n])
# Fetch class_proba and label
to_fetch = [samples['data'],
metrics,
model.layers['class_prob']]
feed_dict = {samples['data']: stream,
samples['cluster_id']: cluster_id}
sample, metrics_, class_prob_= sess.run(to_fetch,
feed_dict)
# Keep only clusters proba, remove noise proba
clusters_prob = class_prob_[0,1::]
# Print Misclassified window
if metrics_['localization_accuracy'] >= 1.0:
map_file ='cluster_ids_{}_comp.npy'.format(args.n_clusters)
clusters_map = np.load(map_file)
lat = np.load("cluster_ids_{}_comp_lat.npy".format(args.n_clusters))
lon = np.load("cluster_ids_{}_comp_lon.npy".format(args.n_clusters))
plot_proba_map(n, lat, lon, clusters_map, clusters_prob,
cluster_id, lat_event, lon_event)
for key in metrics:
mean_metrics[key] += cfg.batch_size * metrics_[key]
mess = model.validation_metrics_message(metrics_)
print '{:03d} | '.format(n) + mess
for key in metrics:
mean_metrics[key] /= len(windows_list)
mess = model.validation_metrics_message(mean_metrics)
print 'Average | ' + mess
if __name__ == '__main__':
tf.app.run()
| mit |
Changliu52/rpg_svo | svo_analysis/src/svo_analysis/analyse_logs.py | 17 | 3497 | #!/usr/bin/python
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
def analyse_logs(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_kf = np.argwhere( (D['dropout'] == 1) & (D['repr_n_mps'] >= 0))
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
is_nokf = np.argwhere( (D['dropout'] == 0) & (D['repr_n_mps'] >= 0))
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot number of reprojected points
mean_n_reproj_points = np.mean(D['repr_n_mps'][is_frame]);
mean_n_reproj_matches = np.mean(D['repr_n_new_references'][is_frame]);
mean_n_edges_final = np.mean(D['sfba_n_edges_final'][is_frame]);
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['repr_n_mps'][is_frame], 'r-',
label='Reprojected Points, avg = %.2f'%mean_n_reproj_points)
ax.plot(D['timestamp'][is_frame], D['repr_n_new_references'][is_frame], 'b-',
label='Feature Matches, avg = %.2f'%mean_n_reproj_matches)
ax.plot(D['timestamp'][is_frame], D['sfba_n_edges_final'][is_frame], 'g-',
label='Points after Optimization, avg = %.2f'%mean_n_edges_final)
ax.set_ylim(bottom=0)
ax.legend(loc='lower right')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'num_reprojected.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot median error before and after pose-optimzation and bundle adjustment
init_error_avg = np.mean(D['sfba_error_init'][is_frame])
opt1_avg = np.mean(D['sfba_error_final'][is_frame])
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='error [px]')
ax.plot(D['timestamp'][is_frame], D['sfba_error_init'][is_frame], 'r-', label='Initial error')
ax.plot(D['timestamp'][is_frame], D['sfba_error_final'][is_frame], 'b-', label='Final error')
ax.legend(ncol=2)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'reprojection_error.pdf'), bbox_inches="tight")
print 'average reprojection error improvement: ' + str(init_error_avg - opt1_avg)
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['n_candidates'][is_frame], 'r-', label='Candidate Points')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'candidate_points.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot number of candidate points
fig = plt.figure(figsize=(8,2))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='px')
ax.plot(D['timestamp'][is_frame], D['sfba_thresh'][is_frame], 'r-', label='Threshold')
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'optimization_thresh.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# write other statistics to file
stat = {'num_frames': len(is_frame),
'num_kfs': len(is_kf),
'reproj_error_avg_improvement': float(init_error_avg - opt1_avg)}
with open(os.path.join(trace_dir,'dataset_stats.yaml'),'w') as outfile:
outfile.write(yaml.dump(stat, default_flow_style=False))
| gpl-3.0 |
zorojean/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.