repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
DarainS/texas-holdem-tools | ai/tfpreflop/run_MountainCar.py | 1 | 2021 | """
Policy Gradient, Reinforcement Learning.
The cart pole example
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import gym
from RL_brain import PolicyGradient
import matplotlib.pyplot as plt
DISPLAY_REWARD_THRESHOLD = -2000 # renders environment if total episode reward is greater then this threshold
# episode: 154 reward: -10667
# episode: 387 reward: -2009
# episode: 489 reward: -1006
# episode: 628 reward: -502
RENDER = False # rendering wastes time
env = gym.make('MountainCar-v0')
env.seed(1) # reproducible, general Policy gradient has high variance
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = PolicyGradient(
n_actions=env.action_space.n,
n_features=env.observation_space.shape[0],
learning_rate=0.02,
reward_decay=0.995,
# output_graph=True,
)
for i_episode in range(1000):
observation = env.reset()
while True:
if RENDER: env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action) # reward = -1 in all cases
RL.store_transition(observation, action, reward)
if done:
# calculate running reward
ep_rs_sum = sum(RL.ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering
print("episode:", i_episode, " reward:", int(running_reward))
vt = RL.learn() # train
if i_episode == 30:
plt.plot(vt) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_ | mit |
yipenggao/moose | python/mooseutils/ImageDiffer.py | 7 | 5535 | #!/usr/bin/env python
import os
class ImageDiffer(object):
"""
A class for comparing images using the structural similarity index (SSIM).
https://en.wikipedia.org/wiki/Structural_similarity
Args:
file1[str]: The base file to compare against (gold).
file2[str]: The file to be compared.
Kwargs:
allowed[float]: (Default: 0.95) The allowed lower limit of the SSIM (1 is identical images).
"""
def __init__(self, file1, file2, **kwargs):
# Store the file names
self.__files = [file1, file2]
# Extract the optional arguments
self.__allowed = float(kwargs.pop('allowed', 0.95))
# Storage for error messages, each stored as a tuple: (error, message)
self.__error = 0 # The computed error
self.__errors = []
# Read the image files
self.__data = []
self.__data.append(self.__readImage(self.__files[0]))
self.__data.append(self.__readImage(self.__files[1]))
# Perform comparison
self.__compare()
def fail(self):
"""
Check the comparison status. (public)
Returns:
bool: True when the test failed.
"""
return len(self.__errors) > 0
def message(self, **kwargs):
"""
Print the error message(s). (public)
Returns:
str: The output message as a single string.
"""
# Header
output = []
output.append('Running ImageDiffer.py')
output.append(' File 1: ' + self.__files[0])
output.append(' File 2: ' + self.__files[1])
output.append(' Allowed (SSIM): ' + str(self.__allowed))
output.append(' Computed (SSIM): ' + str(self.__error))
output.append(' No. of errors: ' + str(len(self.__errors)))
# Errors
cnt = 0
for e in self.__errors:
cnt += 1
output.append('')
output.append('ERROR ' + str(cnt) + ':')
output.append(' ' + e[0])
if e[1]:
output.append(' ' + e[1])
# Print the output
if kwargs.pop('output', False):
print '\n'.join(output)
# Return the text, as a single string
return '\n'.join(output)
# Errors
cnt = 0
for e in self.__errors:
cnt += 1
output.append('')
output.append('ERROR ' + str(cnt) + ':')
output.append(' ' + e[0])
if e[1]:
output.append(' ' + e[1])
# Print the output
if kwargs.pop('output', False):
print '\n'.join(output)
# Return the text, as a single string
return '\n'.join(output)
def __compare(self):
"""
Perform image comparison. (private)
"""
# Do nothing if something failed to open
if len(self.__errors) > 0:
return
# Check sizes
if (self.__data[0].size != self.__data[1].size):
err = 'The two images are different sizes'
msg = [' File 1: ' + self.__files[0]]
msg += [' size: ' + str(self.__data[0].size)]
msg += [' File 2: ' + self.__files[1]]
msg += [' size: ' + str(self.__data[1].size)]
self.__addError(err, msg)
return
# Compute the error
import skimage.measure
self.__error = skimage.measure.compare_ssim(self.__data[0], self.__data[1], multichannel=True)
# Report the error
if self.__error < self.__allowed:
err = 'The files are different.'
msg = ['The difference of the images exceeds the "allowed" SSIM.']
msg += [' Allowed (SSIM): ' + str(self.__allowed)]
msg += [' Computed (SSIM): ' + str(self.__error)]
msg += [' Rel. difference: ' + str( abs(self.__allowed - self.__error) / self.__error)]
self.__addError(err, msg)
return
def __readImage(self, filename):
"""
A read function that appends an error message if the read fails. (private)
"""
if not os.path.exists(filename):
self.__addError('Failed to open ' + filename + ', the file does not exist.')
return None
import matplotlib.pyplot as plt
return plt.imread(filename)
def __addError(self, err, msg=[]):
"""
Add an ImageError object to the storage vector (private)
Args:
err[str]: A string containing the error message.
msg[list]: A detailed message for the error.
"""
self.__errors.append((err, '\n'.join(msg)))
# This file is executable and allows for running the ImageDiffer via the command line
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Perform comparison of images.')
parser.add_argument('files', type=str, nargs='+', help="The image(s) to compare. If a single image is provided the 'gold' version is used.")
args = parser.parse_args()
# Test files
n = len(args.files)
if n == 1:
file1 = args.files[0]
file0 = os.path.join(os.path.dirname(file1), 'gold', os.path.basename(file1))
elif n == 2:
file0 = args.files[0]
file1 = args.files[1]
else:
print "You must specify one or two files for comparison, see -h"
d = ImageDiffer(file0, file1)
print '\n\n' + d.message()
| lgpl-2.1 |
theoryno3/pylearn2 | pylearn2/cross_validation/tests/test_cross_validation.py | 49 | 6767 | """
Tests for cross-validation module.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_train_cv():
"""Test TrainCV class."""
skip_if_no_sklearn()
handle, layer0_filename = tempfile.mkstemp()
handle, layer1_filename = tempfile.mkstemp()
handle, layer2_filename = tempfile.mkstemp()
# train the first hidden layer (unsupervised)
# (test for TrainCV)
trainer = yaml_parse.load(test_yaml_layer0 %
{'layer0_filename': layer0_filename})
trainer.main_loop()
# train the second hidden layer (unsupervised)
# (test for TransformerDatasetCV)
trainer = yaml_parse.load(test_yaml_layer1 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename})
trainer.main_loop()
# train the third hidden layer (unsupervised)
# (test for StackedBlocksCV)
trainer = yaml_parse.load(test_yaml_layer2 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# train the full model (supervised)
# (test for PretrainedLayerCV)
trainer = yaml_parse.load(test_yaml_layer3 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# clean up
os.remove(layer0_filename)
os.remove(layer1_filename)
test_yaml_layer0 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer0_filename)s,
}
"""
test_yaml_layer1 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !pkl: %(layer0_filename)s,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 8,
nhid: 6,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer1_filename)s,
}
"""
test_yaml_layer2 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !obj:pylearn2.cross_validation.blocks.StackedBlocksCV {
layers: [
!pkl: %(layer0_filename)s,
!pkl: %(layer1_filename)s,
],
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 6,
nhid: 4,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer2_filename)s,
}
"""
test_yaml_layer3 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h0',
layer_content: !pkl: %(layer0_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h1',
layer_content: !pkl: %(layer1_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h2',
layer_content: !pkl: %(layer2_filename)s,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 2,
irange: 0.,
},
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
| bsd-3-clause |
leezu/mxnet | python/mxnet/numpy/multiarray.py | 1 | 408331 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy ndarray and util functions."""
try:
from __builtin__ import all as py_all
from __builtin__ import slice as py_slice
except ImportError:
from builtins import all as py_all
from builtins import slice as py_slice
from array import array as native_array
import functools
import ctypes
import warnings
import numpy as _np
from .. import _deferred_compute as dc
from ..autograd import is_recording
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP
from ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\
get_oshape_of_gather_nd_op
from ..ndarray._internal import _set_np_ndarray_class
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle, c_array, mx_int, mx_int64
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types
from ..runtime import Features
from ..context import Context
from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\
is_np_default_dtype
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
from ..ndarray.ndarray import _storage_type
from ..dlpack import ndarray_from_numpy
from .utils import _get_np_op
from .fallback import * # pylint: disable=wildcard-import,unused-wildcard-import
from . import fallback
__all__ = ['ndarray', 'empty', 'empty_like', 'array', 'shape', 'median',
'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'all', 'any', 'broadcast_to',
'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'power', 'bitwise_not',
'delete', 'trace', 'transpose', 'copy', 'moveaxis', 'reshape', 'dot',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'invert',
'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'histogram',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append', 'argsort',
'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',
'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'flatnonzero', 'tril_indices',
'concatenate', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'amax', 'amin', 'max', 'min',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',
'logical_and', 'logical_or', 'logical_xor',
'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot',
'triu_indices_from', 'triu_indices', 'tri',
'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',
'unique', 'lcm', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'cross', 'kron', 'equal', 'not_equal', 'interp',
'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero',
'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul',
'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal']
__all__ += fallback.__all__
# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
_NDARRAY_BASIC_INDEXING = 0
_NDARRAY_ADVANCED_INDEXING = 1
_NDARRAY_EMPTY_TUPLE_INDEXING = 2
# Return code for 0-d boolean array handler
_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
_SIGNED_INT32_UPPER_LIMIT = (2**31 - 1)
# Caching whether MXNet was built with INT64 support or not
_INT64_TENSOR_SIZE_ENABLED = None
def _int64_enabled():
global _INT64_TENSOR_SIZE_ENABLED
if _INT64_TENSOR_SIZE_ENABLED is None:
_INT64_TENSOR_SIZE_ENABLED = Features().is_enabled('INT64_TENSOR_SIZE')
return _INT64_TENSOR_SIZE_ENABLED
# This function is copied from ndarray.py since pylint
# keeps giving false alarm error of undefined-all-variable
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `ndarray` handle.
"""
hdl = NDArrayHandle()
if _int64_enabled():
check_call(_LIB.MXNDArrayCreate64(
c_array_buf(mx_int64, native_array('q', shape)),
ctypes.c_int(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),
ctypes.byref(hdl)))
else:
# When shape is larger than uint32 then there is an overflow error at python end itself.
# It needs to be caught here since the call doesn't even reach backend.
array_size = 1
for idx in shape:
array_size = array_size * idx
if array_size > _SIGNED_INT32_UPPER_LIMIT:
raise Exception("[_new_alloc_handle] Size of tensor you are trying to allocate is " +
"larger than 2^31 elements. Please build with flag " +
"USE_INT64_TENSOR_SIZE=1")
if _np.dtype(dtype) == _np.dtype([('bfloat16', _np.uint16)]):
dtype_type = _np.dtype(dtype)
else:
dtype_type = _np.dtype(dtype).type
check_call(_LIB.MXNDArrayCreate(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[dtype_type])),
ctypes.byref(hdl)))
return hdl
def _reshape_view(a, *shape): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int, or n ints
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.
Some dimensions of the shape can take special value -1, which
infers the dimension of the output shape by using the remainder of the
input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Returns
-------
ndarray
An array with desired shape that shares data with this array.
"""
if len(shape) == 1 and isinstance(shape[0], (list, tuple)):
shape = shape[0]
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape64(a.handle,
len(shape),
c_array(ctypes.c_int64, shape),
False,
ctypes.byref(handle)))
return ndarray(handle=handle, writable=a.writable)
def _as_mx_np_array(object, ctx=None, zero_copy=False):
"""Convert arrays or any array member of container to mxnet.numpy.ndarray on ctx."""
if object is None or isinstance(object, ndarray):
return object
elif isinstance(object, _np.ndarray):
from_numpy = ndarray_from_numpy(ndarray, array)
return from_numpy(object, zero_copy and object.flags['C_CONTIGUOUS'])
elif isinstance(object, (integer_types, numeric_types)):
return object
elif isinstance(object, (_np.bool_, _np.bool)):
return array(object, dtype=_np.bool_, ctx=ctx)
elif isinstance(object, (list, tuple)):
tmp = [_as_mx_np_array(arr, ctx=ctx, zero_copy=zero_copy) for arr in object]
return object.__class__(tmp)
else:
raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object))))
def _as_onp_array(object):
"""Convert object to mxnet.numpy.ndarray."""
cur_ctx = None
if isinstance(object, ndarray):
return object.asnumpy(), object.ctx
elif isinstance(object, (list, tuple)):
tmp = []
for arr in object:
arr, tmp_ctx = _as_onp_array(arr)
# if isinstance(arr, (list, tuple)):
# raise TypeError('type {} not supported'.format(str(type(arr))))
tmp.append(arr)
if cur_ctx is None:
cur_ctx = tmp_ctx
elif tmp_ctx is not None and cur_ctx != tmp_ctx:
raise ValueError('Ambiguous to set the context for the output ndarray since' # pylint: disable=too-few-format-args
' input ndarrays are allocated on different devices: {} and {}'
.format(str(cur_ctx, tmp_ctx)))
return object.__class__(tmp), cur_ctx
else:
return object, cur_ctx
# Have to use 0 as default value for stype since pylint does not allow
# importing _STORAGE_TYPE_DEFAULT from ndarray.py.
def _np_ndarray_cls(handle, writable=True, stype=0):
if stype == -1:
stype = _storage_type(handle)
if stype != 0:
raise ValueError('_np_ndarray_cls currently only supports default storage '
'type, while received stype = {}'.format(stype))
return ndarray(handle, writable=writable)
_set_np_ndarray_class(_np_ndarray_cls)
_NUMPY_ARRAY_FUNCTION_DICT = {}
_NUMPY_ARRAY_UFUNC_DICT = {}
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD = {}
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD = {}
def wrap_mxnp_np_ufunc(func):
"""
A convenience decorator for wrapping for python overload-able ops to provide type
casting for mixed use of mx_np and onp inputs.
Parameters
----------
func : a python overload-able binary function to be wrapped for type casting.
Returns
-------
Function
A function wrapped with type casted.
"""
@functools.wraps(func)
def _wrap_mxnp_np_ufunc(x1, x2):
if isinstance(x2, _np.ndarray):
x2 = _as_mx_np_array(x2, ctx=x1.ctx)
return func(x1, x2)
return _wrap_mxnp_np_ufunc
@set_module('mxnet.numpy')
class ndarray(NDArray): # pylint: disable=invalid-name
"""
ndarray(handle, writable=True):
An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
(its byte-order, how many bytes it occupies in memory, whether it is an integer, a
floating point number, or something else, etc.). Arrays should be constructed using
`array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `mxnet.numpy` module and examine the
methods and attributes of an array.
Parameters
----------
handle: int
The ndarray handle in backend (C++).
writable: bool
Indicates whether inplace-assignment is allowed for the array.
Attributes
----------
T : ndarray
Transpose of the array.
dtype : dtype object
Describes the format of the elements in the array.
size : int
Number of elements in the array.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
"""
@staticmethod
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray
to this function. The operators must comply with the ufunc definition in NumPy.
The following code is adapted from CuPy.
Casting rules for operator with mx_np and onp (inplace op will keep its type)
| Expression | a type | b type | out type|
| --- | --- | --- | --- |
| `a += b` | onp | mx_np | onp |
| `a += b` | mx_np | onp | mx_np |
| `c = a + b` | onp | mx_np | mx_np |
| `c = a + b` | mx_np | onp | mx_np |
"""
ufunc_list = ["add", "subtract", "multiply", "divide", "true_divide", "floor_divide", "power",
"remainder", "bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift",
"greater", "greater_equal", "less", "less_equal", "not_equal", "equal", "matmul"]
if 'out' in kwargs:
# need to unfold tuple argument in kwargs
out = kwargs['out']
if len(out) != 1:
raise ValueError('The `out` parameter must have exactly one ndarray')
kwargs['out'] = out[0]
if method == '__call__':
name = ufunc.__name__
mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)
onp_op = _get_np_op(name)
if mx_ufunc is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(name)
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
if onp_op not in _FALLBACK_ARRAY_UFUNC_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation", name)
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD[onp_op] = True
out = onp_op(*new_inputs, **kwargs)
return _as_mx_np_array(out, ctx=inputs[0].ctx)
# ops with np mx_np
elif name in ufunc_list and isinstance(inputs[0], _np.ndarray):
# inplace
if 'out' in kwargs:
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
return onp_op(*new_inputs, **kwargs)
else:
new_inputs = [_as_mx_np_array(arg, ctx=inputs[1].ctx)
if isinstance(arg, _np.ndarray) else arg for arg in inputs]
return mx_ufunc(*new_inputs, **kwargs)
else:
return mx_ufunc(*inputs, **kwargs)
else:
return NotImplemented
@staticmethod
def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy operators that comply with the array function protocol to
this function.
"""
mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)
func_name = func.__name__
if mx_np_func is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(func)
new_args, cur_ctx = _as_onp_array(args)
if cur_ctx is None:
raise ValueError('Unknown context for the input ndarrays. It is probably a bug. Please'
' create an issue on GitHub.')
new_kwargs = {}
for k, v in kwargs.items():
new_kwargs[k] = v.asnumpy() if isinstance(v, ndarray) else v
if func not in _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation.", func_name)
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD[func] = True
out = func(*new_args, **new_kwargs)
return _as_mx_np_array(out, ctx=cur_ctx)
else:
if py_all(issubclass(t, ndarray) for t in types):
return mx_np_func(*args, **kwargs)
else:
try:
cur_ctx = next(a.ctx for a in args if hasattr(a, 'ctx'))
except StopIteration:
cur_ctx = next(a.ctx for a in kwargs.values() if hasattr(a, 'ctx'))
new_args = _as_mx_np_array(args, ctx=cur_ctx,
zero_copy=func_name in {'may_share_memory', 'shares_memory'})
new_kwargs = {k: _as_mx_np_array(v, cur_ctx) for k, v in kwargs.items()}
return mx_np_func(*new_args, **new_kwargs)
def _get_np_basic_indexing(self, key):
"""
This function indexes ``self`` with a tuple of `slice` objects only.
"""
key_nd = tuple(idx for idx in key if idx is not None)
if len(key_nd) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(key_nd))
)
if len(key_nd) > self.ndim:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(key_nd), self.ndim)
)
none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name
slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)
new_axes = self._new_axes_after_basic_indexing(none_axes, key)
# Check bounds for integer axes
for ax in int_axes: # pylint: disable=invalid-name
if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:
raise IndexError(
'index {} is out of bounds for axis {} with size {}'
''.format(key_nd[ax], ax, self.shape[ax]))
if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):
# Create a shared-memory view by using low-level flat slicing
flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(
slc_key, self.shape
)
handle = NDArrayHandle()
flat_self = self.reshape_view(-1)
if _int64_enabled():
check_call(
_LIB.MXNDArraySlice64(
flat_self.handle,
ctypes.c_int64(flat_begin),
ctypes.c_int64(flat_end),
ctypes.byref(handle),
)
)
else:
check_call(
_LIB.MXNDArraySlice(
flat_self.handle,
ctypes.c_uint32(flat_begin),
ctypes.c_uint32(flat_end),
ctypes.byref(handle),
)
)
sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)
sliced = self.__class__(handle=handle, writable=self.writable)
if 0 in sliced_shape:
sliced = sliced.reshape(sliced_shape)
else:
sliced = sliced.reshape_view(sliced_shape)
else:
begin, end, step = self._basic_indexing_key_to_begin_end_step(
slc_key, self.shape, keep_none=True
)
sliced = _npi.slice(self, begin, end, step)
# Reshape to final shape due to integer and `None` entries in `key`.
final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
if sliced.size == 0:
return sliced.reshape(tuple(final_shape))
else:
return sliced.reshape_view(tuple(final_shape))
def _get_np_empty_tuple_indexing(self, key):
new_shape = []
num_none = 0
for i, idx in enumerate(key):
if idx is None:
new_shape.append(1) # expand dimension
num_none += 1
elif idx == ():
new_shape.append(0) # 0 shape
elif idx == slice(None, None, None):
new_shape.append(self.shape[i - num_none])
return empty(new_shape, dtype=self.dtype)
def _get_np_advanced_indexing(self, key):
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
sliced = _npi.gather_nd(self, idcs)
# Reshape due to `None` entries in `key`.
if new_axes:
final_shape = [sliced.shape[i] for i in range(sliced.ndim)]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
return sliced.reshape(tuple(final_shape))
else:
return sliced
def _set_np_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)
value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)
self._scatter_set_nd(value_nd, idcs)
# pylint: disable=redefined-outer-name
def _get_np_boolean_indexing(self, key, ndim, shape):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single
boolean indexing for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced indexing.
"""
key_shape = key.shape
key_ndim = len(key_shape)
if ndim < key_ndim:
raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'
.format(key_ndim, ndim))
for i in range(key_ndim):
if key_shape[i] != shape[i]:
raise IndexError('boolean index did not match indexed array along dimension {};'
' dimension is {} but corresponding boolean dimension is {}'
.format(i, shape[i], key_shape[i]))
remaining_dims = shape[key_ndim:]
data = _reshape_view(self, -1, *remaining_dims)
key = _reshape_view(key, -1)
return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)
def _set_np_boolean_indexing(self, key, value):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single boolean assign for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced assign.
"""
if isinstance(value, numeric_types):
_npi.boolean_mask_assign_scalar(data=self, mask=key,
value=int(value) if isinstance(value, bool) else value,
start_axis=0, out=self)
elif isinstance(value, ndarray):
_npi.boolean_mask_assign_tensor(data=self, mask=key, value=value, start_axis=0, out=self)
else:
raise NotImplementedError('type %s is not supported.'%(type(value)))
# pylint: disable=too-many-return-statements
def __getitem__(self, key):
"""Return self[key].
Returns a sliced view of this array if the elements fetched are contiguous in memory;
otherwise, returns a newly created NDArray.
This functions supports advanced indexing defined in the following reference with
some restrictions. Boolean indexing is supported only for a single boolean ndarray
as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``
indexing.
For basic indexing, i.e., if ``key`` consists only of integers,
``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is
returned that shares memory with this array if the accessed portion is
contiguous in memory.
Otherwise, a newly created ``ndarray`` is returned.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
Indexing key.
Examples
--------
The default is to give explicit indices for all axes:
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> x[0, :2]
array([0., 1.])
>>> x[:, :-1]
array([[0., 1.],
[3., 4.]])
If fewer indices are given, they are automatically supplemented by an
appropriate number of ``slice(None)`` ("``:``") to the right. For
instance, a single integer indexes along the first axis:
>>> x[0]
array([0., 1., 2.])
>>> x[1:]
array([[3., 4., 5.]])
To omit a range of axes that should be kept as-is, an `Ellipsis`
("``...``") can be used:
>>> x = np.arange(16).reshape(2, 2, 2, 2)
>>> x[0, ..., 1]
array([[1., 3.],
[5., 7.]])
>>> x[0, :, :, 1] # equivalent
array([[1., 3.],
[5., 7.]])
New axes of length 1 can be created by inserting ``None``
(`numpy.newaxis`) in the index:
>>> x = np.arange(6).reshape(2, 3)
>>> x[None, :, :]
array([[[0., 1., 2.],
[3., 4., 5.]]])
>>> x[None, :, :].shape
(1, 2, 3)
If the indexed portion of the array is contiguous in memory, no data
is copied. Instead, a shared-memory view of the original array is
returned, and changes to that view affect the original array:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[0] # contiguous
>>> y
array([[0., 1.],
[2., 3.]])
>>> y[:] = -1
>>> x
array([[[-1., -1.],
[-1., -1.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[1, :1, :] # contiguous
>>> y
array([[4., 5.]])
>>> y[:] = -1
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[-1., -1.],
[ 6., 7.]]])
>>> x = np.arange(0, 8).reshape(2, 2, 2)
>>> y = x[:, :, 1] # not contiguous
>>> y
array([[1., 3.],
[5., 7.]])
>>> y[:] = -1
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
If the indexing key contains `list`, `numpy.ndarray` or `NDArray`
objects, advanced indexing is triggered, which always returns a
copy:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x[[0, 1]]
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> x[[0, 1], :] # equivalent
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
Get negative elements in an ndarray through boolean array indexing
>>> x = np.array([1., -1., -2., 3])
>>> x[x < 0]
array([-1., -2.])
For more imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
ndim = self.ndim # pylint: disable=redefined-outer-name
shape = self.shape # pylint: disable=redefined-outer-name
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool, ctx=self.ctx)
if isinstance(key, list):
try:
new_key = _np.array(key)
if new_key.dtype == _np.bool_:
key = new_key
except Exception as err:
raise TypeError('{}'.format(str(err)))
if isinstance(key, _np.ndarray):
if dc.is_deferred_compute():
raise TypeError('Indexing with a numpy array is not supported in HybridBlock.')
if key.dtype == _np.bool_:
key = array(key, dtype='bool', ctx=self.ctx)
# Handle single boolean index of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced indexing.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool_:
return self._get_np_boolean_indexing(key, ndim, shape)
all = __builtins__['all'] # `def all` below shadows the all builtin
if ndim == 0 and key != ():
raise IndexError('scalar tensor can only accept `()` as index')
# Handle simple cases for higher speed
if isinstance(key, tuple) and len(key) == 0:
return self
if isinstance(key, tuple) and len(key) == ndim\
and py_all(isinstance(idx, integer_types) for idx in key):
out = self
for idx in key:
out = out[idx]
return out
if isinstance(key, integer_types):
# Equivalent to isinstance(key, integer_types) case in numpy/_symbol.py
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
# Unlike numpy/_symbol.py, calls MXNDArraySlice64 writable memory
# sharing if key.step not in [None, 1]. Equivalent otherwise to
# isinstance(key, py_slice) case in _symbol.py otherwise.
if key.step is None or key.step == 1:
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
elif key.step != 0:
start = [None] if key.start is None else key.start
stop = [None] if key.stop is None else key.stop
return _npi.slice(self, start, stop, key.step)
else:
raise ValueError("slice step cannot be zero")
elif isinstance(key, tuple) and \
all((isinstance(arr, NDArray) and _np.issubdtype(arr.dtype, _np.integer) and \
arr.ndim > 0) for arr in key):
# Equivalent case in numpy/_symbol.py
return _npi.advanced_indexing_multiple(self, _npi.stack(*key))
elif isinstance(key, tuple) and dc.is_deferred_compute():
# Equivalent to isinstance(key, tuple) case in numpy/_symbol.py
# Only enabled in deferred compute mode, as this codepath prevents
# memory sharing which may be desired in non-deferred compute
# imperative mode.
begin = []
end = []
step = []
new_shape = ()
assert len(key) # len(key) == 0 is handled a above
unsupported = False
for index in key:
if isinstance(index, py_slice):
if index.step is not None and index.step == 0:
raise ValueError("slice step cannot be zero")
begin.append(index.start)
end.append(index.stop)
step.append(index.step)
new_shape += (-2,)
elif isinstance(index, integer_types):
if index >= 0:
begin.append(index)
end.append(index+1)
step.append(1)
else:
begin.append(index)
end.append(index - 1)
step.append(-1)
new_shape += (-3,)
else:
unsupported = True
break
if not unsupported:
new_shape += (-4,)
sliced = _npi.slice(self, begin, end, step)
return _npi.reshape(sliced, new_shape)
# Special handling for cases only supported in imperative mode
if dc.is_deferred_compute():
raise TypeError('The type of indexing used is not supported in HybridBlock.')
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be prepended
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
indexing_dispatch_code = get_indexing_dispatch_code(key)
if indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
# won't be affected by zero-dim boolean indices
return self._get_np_empty_tuple_indexing(key)
elif indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_basic_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_basic_indexing(key)
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_adanced_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_advanced_indexing(key)
else:
raise RuntimeError
# pylint: disable=inconsistent-return-statements
def __setitem__(self, key, value):
"""Sets ``self[key]`` to ``value``.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,
with the restriction that boolean array indexing is not supported.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
The indexing key.
value : scalar or array-like object that can be broadcast to the shape of self[key]
The value to set.
Examples
--------
>>> x = np.zeros((2, 3))
>>> x[:] = 1
>>> x
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> x[:, 1:2] = 2
>>> x
array([[ 1., 2., 1.],
[ 1., 2., 1.]])
>>> x[1:2, 1:] = 3
>>> x
array([[ 1., 2., 1.],
[ 1., 3., 3.]])
>>> x[1:, 0:2] = np.zeros((1, 2))
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 3.]])
>>> x[1, 2] = 4
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 4.]])
>>> x[[0], [1, 2]] = 5
>>> x
array([[ 1., 5., 5.],
[ 0., 0., 4.]])
>>> x[::-1, 0:2:2] = [6]
>>> x
array([[ 6., 5., 5.],
[ 6., 0., 4.]])
For imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
if isinstance(value, NDArray) and not isinstance(value, ndarray):
raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool)
# Handle single boolean assign of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced assign.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool:
return self._set_np_boolean_indexing(key, value)
# handle basic and advanced indexing
if self.ndim == 0:
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self._full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
value.copyto(self)
elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:
if isinstance(value, _np.generic) or value.shape != self.shape:
value = value.reshape(self.shape)
self._sync_copyfrom(value)
else:
raise ValueError('setting an array element with a sequence.')
else:
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend == _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded
# prepend actually has no influence on __setitem__
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return # no action is needed
slc_key = tuple(idx for idx in key if idx is not None)
if len(slc_key) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(slc_key))
)
if len(slc_key) > self.ndim and self.ndim != 0:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(slc_key), self.ndim)
)
indexing_dispatch_code = get_indexing_dispatch_code(slc_key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class
elif indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
pass # no action needed
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
self._set_np_advanced_indexing(key, value)
else:
raise ValueError(
'Indexing NDArray with index {} of type {} is not supported'
''.format(key, type(key))
)
def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):
"""Return a broadcast `ndarray` with same context and dtype as ``self``.
For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the
value_nd is assigned to not yet expanded space in original array.
`value`: numeric types or array like.
`bcast_shape`: a shape tuple.
`squeeze_axes`: a sequence of axes to squeeze in the value array.
Note: mxnet.numpy.ndarray not support NDArray as assigned value.
"""
if isinstance(value, numeric_types):
value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)
elif isinstance(value, self.__class__):
value_nd = value.as_in_ctx(self.ctx)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.ctx, dtype=self.dtype)
except:
raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '
'object {} of type {}'.format(value, type(value)))
# For advanced indexing setitem, if there is None in indices, we need to squeeze the
# assigned value_nd since None is also ignored in slicing the original array.
if squeeze_axes and value_nd.ndim > len(bcast_shape):
squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])
value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))
# handle the cases like the following
# a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b
# b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed
if value_nd.ndim > len(bcast_shape):
squeeze_axes = []
for i in range(value_nd.ndim - len(bcast_shape)):
if value_nd.shape[i] == 1:
squeeze_axes.append(i)
else:
break
if squeeze_axes:
value_nd = value_nd.squeeze(squeeze_axes)
if value_nd.shape != bcast_shape:
if value_nd.size == 0:
value_nd = value_nd.reshape(bcast_shape)
else:
value_nd = value_nd.broadcast_to(bcast_shape)
return value_nd
@wrap_mxnp_np_ufunc
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
@wrap_mxnp_np_ufunc
def __iadd__(self, other):
"""x.__iadd__(y) <=> x += y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return add(self, other, out=self)
def __invert__(self):
"""x.__invert__() <=> ~x"""
return invert(self)
@wrap_mxnp_np_ufunc
def __and__(self, other):
"""x.__and__(y) <=> x & y"""
return bitwise_and(self, other)
@wrap_mxnp_np_ufunc
def __or__(self, other):
"""x.__or__(y) <=> x | y"""
return bitwise_or(self, other)
@wrap_mxnp_np_ufunc
def __xor__(self, other):
"""x.__xor__(y) <=> x ^ y"""
return bitwise_xor(self, other)
@wrap_mxnp_np_ufunc
def __iand__(self, other):
"""x.__iand__(y) <=> x &= y"""
return bitwise_and(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ior__(self, other):
r"""x.__ior__(y) <=> x \|= y"""
return bitwise_or(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ixor__(self, other):
"""x.__ixor__(y) <=> x ^= y"""
return bitwise_xor(self, other, out=self)
def __round__(self, n=0):
"""x.__round__(n)"""
return round(self, decimals=n)
def __abs__(self):
"""x.__abs__()"""
return absolute(self)
def __ceil__(self):
"""x.__ceil__()"""
return ceil(self)
def __floor__(self):
"""x.__floor__()"""
return floor(self)
def __trunc__(self):
"""x.__trunc__()"""
return trunc(self)
@wrap_mxnp_np_ufunc
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
@wrap_mxnp_np_ufunc
def __isub__(self, other):
"""x.__isub__(y) <=> x -= y"""
if not self.writable:
raise ValueError('trying to subtract from a readonly ndarray')
return subtract(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
@wrap_mxnp_np_ufunc
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __neg__(self):
return negative(self)
@wrap_mxnp_np_ufunc
def __imul__(self, other):
r"""x.__imul__(y) <=> x \*= y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return multiply(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return self.__mul__(other)
@wrap_mxnp_np_ufunc
def __div__(self, other):
"""x.__div__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __idiv__(self, other):
"""x.__idiv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __itruediv__(self, other):
"""x.__itruediv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
@wrap_mxnp_np_ufunc
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
@wrap_mxnp_np_ufunc
def __imod__(self, other):
"""x.__imod__(y) <=> x %= y"""
return mod(self, other, out=self)
@wrap_mxnp_np_ufunc
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
@wrap_mxnp_np_ufunc
def __rpow__(self, other):
"""x.__rpow__(y) <=> y ** x"""
return power(other, self)
@wrap_mxnp_np_ufunc
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
return equal(self, other)
def __hash__(self):
raise NotImplementedError
@wrap_mxnp_np_ufunc
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
return not_equal(self, other)
@wrap_mxnp_np_ufunc
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
return greater(self, other)
@wrap_mxnp_np_ufunc
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
return greater_equal(self, other)
@wrap_mxnp_np_ufunc
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
return less(self, other)
@wrap_mxnp_np_ufunc
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
return less_equal(self, other)
@wrap_mxnp_np_ufunc
def __matmul__(self, other):
"""x.__matmul__(y) <=> x @ y"""
return matmul(self, other)
@wrap_mxnp_np_ufunc
def __rmatmul__(self, other):
"""x.__rmatmul__(y) <=> y @ x"""
return matmul(other, self)
@wrap_mxnp_np_ufunc
def __imatmul__(self, other):
"""x.__imatmul__(y) <=> x @= y"""
return matmul(self, other, out=self)
def __bool__(self):
num_elements = self.size
if num_elements == 0:
warnings.simplefilter('default')
warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'
' future this will result in an error.', DeprecationWarning)
return False
elif num_elements == 1:
return bool(self.item())
else:
raise ValueError("The truth value of an ndarray with multiple elements is ambiguous.")
__nonzero__ = __bool__
def __float__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return float(self.item())
def __int__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return int(self.item())
def __len__(self):
"""Number of elements along the first axis."""
shape = self.shape # pylint: disable=redefined-outer-name
if len(shape) == 0:
raise TypeError('len() of unsized object')
return self.shape[0]
def __reduce__(self):
return ndarray, (None,), self.__getstate__()
def item(self, *args):
"""Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
*args : Arguments (variable number and type)
none: in this case, the method only works for arrays with one element (a.size == 1),
which element is copied into a standard Python scalar object and returned.
int_type: this argument is interpreted as a flat index into the array, specifying which
element to copy and return.
tuple of int_types: functions as does a single int_type argument, except that the
argument is interpreted as an nd-index into the array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable Python scalar.
"""
# TODO(junwu): no need to call asnumpy() on the whole array.
return self.asnumpy().item(*args)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""
return nonzero(self)
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose(). This always returns a copy of self."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def all(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.all(self, axis=axis, out=out, keepdims=keepdims)
def any(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.any(self, axis=axis, out=out, keepdims=keepdims)
def as_nd_ndarray(self):
"""Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods."""
hdl = NDArrayHandle()
check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(handle=hdl, writable=self.writable)
def as_np_ndarray(self):
"""A convenience function for creating a numpy ndarray from the current ndarray
with zero copy. For this class, it just returns itself since it's already a
numpy ndarray."""
return self
def __repr__(self):
"""
Returns a string representation of the array.
The dtype of the ndarray will be appended if it's inconsistent with current dtype.
The context of the ndarray will be appended for devices other than CPU.
Examples
--------
>>> from mxnet import np, npx
>>> a = np.random.uniform(size=(2, 3))
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> print(a)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]]
>>> a.dtype
dtype('float32')
>>> npx.set_np_float64()
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], dtype=float32)
>>> npx.set_np_float64(default_float64=False)
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> b = a.astype(np.float64)
>>> b
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64)
>>> print(b)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]]
>>> b.dtype
dtype('float64')
>>> c = a.copyto(npx.gpu(0))
>>> c
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))
>>> print(c)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]] @gpu(0)
>>> d = b.copyto(npx.gpu(0))
>>> d
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))
>>> print(d)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]] @gpu(0)
"""
if self._alive:
array_str = self.asnumpy().__repr__()
dtype = self.dtype
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
if 'dtype=' in array_str:
if dtype == default_dtype:
array_str = array_str[:array_str.rindex(',')] + ')'
elif dtype not in (default_dtype, _np.bool_):
array_str = array_str[:-1] + ', dtype={})'.format(dtype)
context = self.ctx
if context.device_type == 'cpu':
return array_str
return array_str[:-1] + ', ctx={})'.format(str(context))
else:
return '<FREED {}>'.format(self.__class__.__name__)
def __str__(self):
"""Returns a string representation of the array."""
array_str = self.asnumpy().__str__()
context = self.ctx
if context.device_type == 'cpu' or self.ndim == 0:
return array_str
return '{array} @{ctx}'.format(array=array_str, ctx=context)
def __format__(self, fmt):
"""Return value.__format__(format_spec). Overwrite to include 0-d array"""
if self.ndim == 0:
return self.item().__format__(fmt)
elif len(fmt) == 0:
return self.__str__().__format__(fmt)
else:
raise TypeError("Cannot format mxnet.numpy.ndarray with format_spec")
def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ
"""Attach a gradient buffer to this ndarray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
* 'write': gradient will be overwritten on every backward.
* 'add': gradient will be added to existing value on every backward.
* 'null': do not compute gradient for this NDArray.
"""
grad = _mx_nd_np.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this ndarray."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _np_ndarray_cls(hdl)
def detach(self):
"""Returns a new ndarray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _np_ndarray_cls(hdl)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument, too-many-arguments
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
Notes
-----
This function differs from the official `ndarray`'s ``astype`` function in the following
aspects:
* `order` only supports 'C' and 'K'.
* `casting` only supports 'unsafe'.
* `subok` only supports ``True``.
"""
if order is not None and order != 'K' and order != 'C':
raise ValueError('order must be either \'K\' or \'C\'')
if casting != 'unsafe':
raise ValueError('casting must be equal to \'unsafe\'')
if not subok:
raise ValueError('subok must be equal to True')
if dtype is None:
dtype = _np.float32
if not copy and _np.dtype(dtype) == self.dtype:
return self
return _npi.cast(self, dtype=dtype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``ndarray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``np.ndarray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : ndarray or Context
The destination array or context.
Returns
-------
out: ndarray
The copied array. If ``other`` is an ``ndarray``, then the return value
and ``other`` will point to the same ``ndarray``.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = np.zeros((2, 3), ctx=npx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if isinstance(other, ndarray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _npi.copyto(self, out=other)
elif isinstance(other, Context):
hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _npi.copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscalar(self):
raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def as_in_context(self, context):
"""This function has been deprecated. Please refer to ``ndarray.as_in_ctx``."""
warnings.warn('ndarray.as_in_context has been renamed to'
' ndarray.as_in_ctx', DeprecationWarning)
return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()
def as_in_ctx(self, ctx):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
ndarray
The target array.
"""
if self.ctx == ctx:
return self
return self.copyto(ctx)
@property
def ctx(self):
"""Device context of the array.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.ctx
cpu(0)
>>> type(x.ctx)
<class 'mxnet.context.Context'>
>>> y = np.zeros((2, 3), npx.gpu(0))
>>> y.ctx
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def context(self):
"""This function has been deprecated. Please refer to ``ndarray.ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().context
def copy(self, order='C'): # pylint: disable=arguments-differ
"""Return a coyp of the array, keeping the same context.
Parameters
----------
order : str
The memory layout of the copy. Currently, only c-contiguous memory
layout is supported.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = x.copy()
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if order != 'C':
raise NotImplementedError('ndarray.copy only supports order=\'C\', while '
'received {}'.format(str(order)))
return self.copyto(self.ctx)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError("'{}' is an invalid keyword argument for this function"
.format(list(kwargs.keys())[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')
def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Inheritated from NDArray.reshape.
"""
return super(ndarray, self).reshape(*shape, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')
def slice_assign_scalar(self, value, begin, end, step):
"""
Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape
and will be cast to the same dtype of the ndarray.
Parameters
----------
value: numeric value
Value and this ndarray should be of the same data type.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)
def slice_assign(self, rhs, begin, end, step):
"""
Assign the rhs to a cropped subset of this ndarray in place.
Returns the view of this ndarray.
Parameters
----------
rhs: ndarray.
rhs and this NDArray should be of the same data type, and on the same device.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
out : ndarray
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> assigned = np.zeros((1, 1, 2))
>>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)
def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return take(self, indices, axis, mode=mode)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')
def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise sort(self, axis=axis, kind=kind, order=order)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')
def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return argsort(self, axis=axis, kind=kind, order=order)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')
def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the minium values along the given axis.
Refer to `mxnet.numpy.argmin` for full documentation."""
return argmin(self, axis, out)
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')
def tile(self, reps): # pylint: disable=arguments-differ
"""Construct an array by repeating A the number of times given by reps.
Refer to `mxnet.numpy.tile` for full documentation."""
return tile(self, reps=reps)
def transpose(self, *axes): # pylint: disable=arguments-differ
"""Permute the dimensions of an array."""
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)):
axes = axes[0]
elif axes[0] is None:
axes = None
return transpose(self, axes=axes)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')
def diagonal(self, offset=0, axis1=0, axis2=1): # pylint: disable=arguments-differ
"""Return the diagonal with the given offset.
If array has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned.
Refer to `mxnet.numpy.diagonal` for full documents.
"""
return diagonal(self, offset=offset, axis1=axis1, axis2=axis2)
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# pylint: disable=too-many-arguments, arguments-differ
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
# pylint: enable=too-many-arguments, arguments-differ
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _mx_nd_np.cumsum(self, axis=axis, dtype=dtype, out=out)
def tolist(self):
return self.asnumpy().tolist()
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _mx_nd_np.max(self, axis=axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return _mx_nd_np.min(self, axis=axis, out=out, keepdims=keepdims)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')
def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return round(self, decimals=decimals, out=out, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return squeeze(self, axis=axis)
def broadcast_to(self, shape): # pylint: disable=redefined-outer-name
return _mx_nd_np.broadcast_to(self, shape)
def broadcast_like(self, other):
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def _full(self, value):
"""
Currently for internal use only. Implemented for __setitem__.
Assign to self an array of self's same shape and type, filled with value.
"""
return _mx_nd_np.full(self.shape, value, ctx=self.ctx, dtype=self.dtype, out=self)
# pylint: disable=redefined-outer-name
def _scatter_set_nd(self, value_nd, indices):
"""
This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing
"""
return _npi.scatter_set_nd(
lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
)
# pylint: enable=redefined-outer-name
@property
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.np.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.np.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
>>> z = mx.np.array(3)
>>> z.shape
()
"""
num_dim = mx_int()
if _int64_enabled():
pdata = ctypes.POINTER(mx_int64)()
check_call(_LIB.MXNDArrayGetShape64(
self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))
else:
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))
if num_dim.value == -1:
return None
else:
return tuple(pdata[:num_dim.value]) # pylint: disable=invalid-slice-index
@property
def ndim(self):
"""Number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
@property
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = np.zeros((2,3))
>>> x.dtype
dtype('float32')
>>> y = np.zeros((2,3), dtype='int32')
>>> y.dtype
dtype('int32')
"""
return _np.dtype(super(ndarray, self).dtype)
def tostype(self, stype):
raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')
@set_module('mxnet.numpy')
def empty(shape, dtype=float, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`.
Note that this behavior is different from NumPy's `empty` function where `float64`
is the default value, here you can set your default dtype as 'float32' or 'float64'
because `float32` is considered as the default data type in deep learning.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and order.
Examples
--------
>>> np.empty([2, 2])
array([[ 0.000000e+00, -2.524355e-29],
[ nan, -8.592023e+09]]) # uninitialized
>>> np.empty([2, 2], dtype=int)
array([[8751743591039004782, 3196766424264760104],
[7583328881310196768, 562950123910254]], dtype=int64) # uninitialized
"""
if order != 'C':
raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'
.format(str(order)))
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
if isinstance(shape, int):
shape = (shape,)
return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def array(object, dtype=None, ctx=None):
"""
Create an array.
Parameters
----------
object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array.
The default dtype is ``object.dtype`` if `object` is an `ndarray`, `float32` otherwise.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
* When npx.is_np_default_dtype() returns False, default dtype is float32;
* When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
Examples
--------
>>> np.array([1, 2, 3])
array([1., 2., 3.])
>>> np.array([[1, 2], [3, 4]])
array([[1., 2.],
[3., 4.]])
>>> np.array([[1, 0], [0, 1]], dtype=bool)
array([[ True, False],
[False, True]])
>>> np.array([1, 2, 3]).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.array([1, 2, 3]).dtype
dtype('float64')
"""
if ctx is None:
ctx = current_context()
if isinstance(object, _np.ndarray):
if is_np_default_dtype():
dtype = object.dtype if dtype is None else dtype
else:
dtype = _np.float32 if dtype is None or object.dtype is _np.float64 else dtype
if isinstance(object, ndarray):
dtype = object.dtype if dtype is None else dtype
elif isinstance(object, NDArray):
raise ValueError("If you're trying to create a mxnet.numpy.ndarray "
"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.")
else:
if dtype is None:
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
dtype = object.dtype if hasattr(object, "dtype") else default_dtype
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
# printing out the error raised by official NumPy's array function
# for transparency on users' side
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
else:
ret[:] = object
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return _mx_nd_np.shape(a)
@set_module('mxnet.numpy')
def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `zeros` function where `float64`
is the default value, here we can set 'float32' or 'float64' as your default dtype,
because `float32` is considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
Examples
--------
>>> np.zeros(5)
array([0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0], dtype=int64)
>>> np.zeros((2, 1))
array([[0.],
[0.]])
"""
return _mx_nd_np.zeros(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is depend on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `ones` function where
`float64` is the default value.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1], dtype=int64)
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
return _mx_nd_np.ones(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
return _mx_nd_np.broadcast_to(array, shape)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):
r"""Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx : mxnet.context.Context
The device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
.. note::
This function differs from the original numpy.full in the following way(s):
* Has an additional `ctx` argument to specify the device
* Has an additional `out` argument
* Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
return _mx_nd_np.empty_like(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
all : ndarray, bool
A new boolean or array is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _mx_nd_np.all(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless axis is not None
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
any : bool or ndarray
A new boolean or ndarray is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _mx_nd_np.any(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return _mx_nd_np.identity(n, dtype, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
.. note::
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
* Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
return _mx_nd_np.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. note::
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
* Only support ndarray as input.
* Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>>
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
"""
return _mx_nd_np.add(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
r"""Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
"""
return _mx_nd_np.subtract(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
"""
return _mx_nd_np.multiply(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""Returns a true division of the inputs, element-wise.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types including boolean, the output is of float32 or
float64 type, which depends on your current default dtype:
* When ``npx.is_np_default_dtype()`` returns False, default dtype is float32.
* When ``npx.is_np_default_dtype()`` returns True, default dtype is float64.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.divide(x1, x2, out=out)
@set_module('mxnet.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.true_divide(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.mod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.mod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.fmod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None, **kwargs):
r"""Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : alternative matrix product with different broadcasting rules.
einsum : Einstein summation convention.
.. note::
The behavior depends on the arguments in the following way.
* If both arguments are ``2-D`` they are multiplied like conventional matrices.
* If either argument is ``N-D``, ``N > 2``, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
* If the first argument is ``1-D``, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
* If the second argument is ``1-D``, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
* Multiplication by scalars is not allowed, use multiply instead.
* Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _mx_nd_np.matmul(a, b, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.remainder(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.remainder(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = np.arange(6)
>>> np.power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1., 2., 3., 3., 2., 1.],
[1., 2., 3., 3., 2., 1.]])
>>> np.power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
"""
return _mx_nd_np.power(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _mx_nd_np.lcm(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _mx_nd_np.sin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.cos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.sinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _mx_nd_np.cosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
----------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
.. note::
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
* input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.tanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _mx_nd_np.log10(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _mx_nd_np.sqrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _mx_nd_np.cbrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _mx_nd_np.abs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs(np.array([-1.2, 1.2]))s
array([ 1.2, 1.2])
"""
return _mx_nd_np.fabs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _mx_nd_np.absolute(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _mx_nd_np.exp(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _mx_nd_np.expm1(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
.. note::
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
* Only support ndarray or scalar now.
* `where` argument is not supported.
* Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _mx_nd_np.arcsin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _mx_nd_np.arccos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _mx_nd_np.arctan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
.. note::
* Only supports real number as input elements.
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
Scalars as input:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _mx_nd_np.sign(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
.. note::
Currently only supports data of real values and ``inf`` as input. Returns data of
real value, ``inf``, ``-inf`` and ``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using the default float32 dtype leads to slightly different behavior
>>> a = np.array([1, np.exp(1), np.exp(2), 0])
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _mx_nd_np.log(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
.. note::
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _mx_nd_np.rint(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _mx_nd_np.log2(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _mx_nd_np.log1p(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
Only ndarray is supported.
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _mx_nd_np.degrees(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
.. note::
"rad2deg(x)" is "x * 180 / pi".
This function differs from the original numpy.arange in the following aspects:
* Only support float32 and float64.
* `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _mx_nd_np.rad2deg(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
.. note::
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _mx_nd_np.radians(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
.. note::
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
* Only support float32 and float64.
* `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _mx_nd_np.deg2rad(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
* Only support ndarray and scalar now.
* `where` argument is not supported.
"""
return _mx_nd_np.reciprocal(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
.. note::
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
* Only support ndarray and scalar now.
* `where` argument is not supported.
* Complex input is not supported.
"""
return _mx_nd_np.square(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
Returns
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples
--------
>>> np.negative(1)
-1
"""
return _mx_nd_np.negative(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _mx_nd_np.fix(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters
----------
x : ndarray
Input array.
out : ndarray or none, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
Returns
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples
---------
>>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))
array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])
"""
return _mx_nd_np.tan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _mx_nd_np.ceil(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The ceil of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _mx_nd_np.floor(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.trunc in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.logical_not in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _mx_nd_np.logical_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. DType of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _mx_nd_np.arcsinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _mx_nd_np.arccosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(1)
0.0
"""
return _mx_nd_np.arctanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
return _mx_nd_np.argsort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
return _mx_nd_np.sort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def tensordot(a, b, axes=2):
r"""Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
.. note::
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _mx_nd_np.tensordot(a, b, axes)
@set_module('mxnet.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or ndarray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
Examples
--------
>>> np.histogram(np.arange(4), bins=np.arange(5))
[array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]
"""
return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def eye(N, M=None, k=0, dtype=float, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]], dtype=int64)
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return _mx_nd_np.eye(N, M, k, dtype, **kwargs)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
.. note::
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
* `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
* axis could only be 0
* There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, ctx=ctx)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1., 2.]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1.],
[2.]])
>>> y.shape
(2, 1)
Note that some examples may use None instead of np.newaxis. These are the same objects:
>>> np.newaxis is None
True
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.tile(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _mx_nd_np.tile(A, reps)
@set_module('mxnet.numpy')
def trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : ndarray
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : ndarray, optional
Array into which the output is placed. It must be of the right shape
and right type to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
Examples
--------
>>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> np.trace(a)
array(3.)
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.trace(a)
array([6., 8.])
>>> a = np.arange(24).reshape((2, 2, 2, 3))
>>> np.trace(a).shape
(2, 3)
"""
return _mx_nd_np.trace(a, offset, axis1, axis2, out)
@set_module('mxnet.numpy')
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : ndarray
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : ndarray
a with its axes permuted.
.. note::
This function differs from the original `numpy.transpose
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in
the following way(s):
* only ndarray is accepted as valid input, python iterables are not supported
* the operator always returns an `ndarray` that does not share the memory with the input
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.transpose(x)
array([[0., 2.],
[1., 3.]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _mx_nd_np.transpose(a, axes)
@set_module('mxnet.numpy')
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _mx_nd_np.repeat(a, repeats, axis)
@set_module('mxnet.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _mx_nd_np.tril(m, k)
@set_module('mxnet.numpy')
def tri(N, M=None, k=0, dtype=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
return _mx_nd_np.tri(N, M, k, dtype, ctx)
@set_module('mxnet.numpy')
def triu_indices(n, k=0, m=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return _mx_nd_np.triu_indices(n, k, m, ctx)
@set_module('mxnet.numpy')
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
"""
return _mx_nd_np.triu_indices_from(arr, k)
@set_module('mxnet.numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
if m is None:
m = n
return _mx_nd_np.tril_indices(n, k, m)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def triu(m, k=0):
r"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
return _mx_nd_np.triu(m, k)
@set_module('mxnet.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
* When npx.is_np_default_dtype() returns False, default dtype is float32;
* When npx.is_np_default_dtype() returns True, default dtype is int64.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
Examples
--------
>>> np.arange(3)
array([0., 1., 2.])
>>> np.arange(3.0)
array([0., 1., 2.])
>>> np.arange(3,7)
array([3., 4., 5., 6.])
>>> np.arange(3,7,2)
array([3., 5.])
>>> np.arange(3).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.arange(3).dtype
dtype('int64')
"""
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
"""
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example, ``[2, 3]``
would, for ``axis=0``, result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
return _mx_nd_np.array_split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def vsplit(ary, indices_or_sections):
r"""Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
.. note::
This function differs from the original `numpy.vsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.vsplit.html>`_ in
the following aspects:
* Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
* In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> # With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
return _mx_nd_np.vsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def dsplit(ary, indices_or_sections):
r"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
* ary[:, :, :2]
* ary[:, :, 2:3]
* ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
.. note::
This function differs from the original `numpy.dsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.dsplit.html>`_ in
the following aspects:
* Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
* In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 2,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
return _mx_nd_np.dsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
"""
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
@set_module('mxnet.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _mx_nd_np.append(arr, values, axis=axis)
@set_module('mxnet.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.rand(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> np.stack((a, b), axis=-1)
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.stack(arrays, axis=axis, out=out)
@set_module('mxnet.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.vstack(arrays)
@set_module('mxnet.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.row_stack(arrays)
@set_module('mxnet.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.column_stack(tup)
@set_module('mxnet.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.hstack(arrays)
@set_module('mxnet.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.maximum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.fmax(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.minimum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.fmin(x1, x2, out=out)
@set_module('mxnet.numpy')
def max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.max(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.min(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1.],
[2.],
[3.]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.swapaxes(x,0,2)
array([[[0., 4.],
[2., 6.]],
[[1., 5.],
[3., 7.]]])
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
from numbers import Number
if isinstance(a, Number):
# In case input is a scalar, the computation would fall back to native numpy.
# The value returned would be a python scalar.
return _np.clip(a, a_min, a_max, out=None)
return _mx_nd_np.clip(a, a_min, a_max, out=out)
@set_module('mxnet.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
.. note::
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _mx_nd_np.argmax(a, axis, out)
@set_module('mxnet.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
.. note::
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _mx_nd_np.argmin(a, axis, out)
@set_module('mxnet.numpy')
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.amax(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.amin(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
* When all weights along axis sum to zero.
* When the length of 1D weights is not the same as the shape of a along axis.
* When given 1D weights, the axis is not specified or is not int.
* When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
.. note::
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
* Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
* Does not support complex dtype
* The dtypes of a and weights must be the same
* Integral a results in float32 or float64 returned dtype:
* When npx.is_np_default_dtype() returns False, default dtype is float32,
* When npx.is_np_default_dtype() returns True, default dtype is float64;
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
return _mx_nd_np.average(a, axis=axis, weights=weights, returned=returned, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean.
For integer inputs, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
For floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
.. note::
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
* only ndarray is accepted as valid input, python iterables or scalar is not supported
* default data type for integer input is float32 or float64, which depends on your current default dtype
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55, dtype=float64)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
return _mx_nd_np.delete(arr, obj, axis=axis)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance.
For arrays of integer type, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
For arrays of float types it is the same as the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def indices(dimensions, dtype=None, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `int64`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
.. note::
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
* ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _mx_nd_np.copysign(x1, x2, out=out)
@set_module('mxnet.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
.. note::
This function differs from the original numpy.arange in the following aspects:
* Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
return _mx_nd_np.ravel(x, order)
@set_module('mxnet.numpy')
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
order : Only row-major is supported currently.
Returns
-------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
[[3. 6. 6.]
[4. 5. 1.]]
>>> np.unravel_index(1621, (6,7,8,9))
[3, 1, 4, 1]
"""
return _mx_nd_np.unravel_index(indices, shape, order=order)
@set_module('mxnet.numpy')
def flatnonzero(a):
r"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return _mx_nd_np.flatnonzero(a)
@set_module('mxnet.numpy')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters
----------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return _mx_nd_np.diag_indices_from(arr)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hanning(M, dtype=None, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hamming(M, dtype=None, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def blackman(M, dtype=None, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
return _mx_nd_np.flip(m, axis, out=out)
@set_module('mxnet.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
.. note::
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _mx_nd_np.around(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round_(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
.. notes::
*arctan2* is identical to the ``atan2`` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
+========+========+==================+
| `x1` | `x2` | `arctan2(x1,x2)` |
+========+========+==================+
| +/- 0 | +0 | +/- 0 |
+========+========+==================+
| +/- 0 | -0 | +/- pi |
+========+========+==================+
| > 0 | +/-inf | +0 / +pi |
+========+========+==================+
| < 0 | +/-inf | -0 / -pi |
+========+========+==================+
| +/-inf | +inf | +/- (pi/4) |
+========+========+==================+
| +/-inf | -inf | +/- (3*pi/4) |
+========+========+==================+
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
* Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _mx_nd_np.arctan2(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
.. note::
This function differs from the original numpy.arange in the following aspects:
* Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _mx_nd_np.hypot(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([26, 5], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.bitwise_and(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)
array([26, 6], dtype=int32)
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5], dtype=int32)
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.bitwise_xor(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _mx_nd_np.bitwise_or(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _mx_nd_np.ldexp(x1, x2, out)
@set_module('mxnet.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
array(30.)
>>> np.vdot(b, a)
array(30.)
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.numpy')
def inner(a, b):
r"""Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
.. note::
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
array(2.)
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14., 38., 62.],
[ 86., 110., 134.]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.numpy')
def outer(a, b):
r"""Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.numpy')
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : ndarray
Components of the first vector(s).
b : ndarray
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
Notes
-----
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = np.array([1., 2., 3.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([-3., 6., -3.])
One vector with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Equivalently:
>>> x = np.array([1., 2., 0.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Both vectors with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5.])
>>> np.cross(x, y)
array(-3.)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.]])
>>> y = np.array([[4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[-3., 6., -3.],
[ 3., -6., 3.]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3., 3.],
[ 6., -6.],
[-3., 3.]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[ -6., 12., -6.],
[ 0., 0., 0.],
[ 6., -12., 6.]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24., 48., -24.],
[-30., 60., -30.],
[-36., 72., -36.]])
"""
return _mx_nd_np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
@set_module('mxnet.numpy')
def kron(a, b):
r"""Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : ndarray
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
.. note::
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
"""
return _mx_nd_np.kron(a, b)
@set_module('mxnet.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _mx_nd_np.equal(x1, x2, out)
@set_module('mxnet.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _mx_nd_np.not_equal(x1, x2, out)
@set_module('mxnet.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _mx_nd_np.greater(x1, x2, out)
@set_module('mxnet.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _mx_nd_np.less(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_and(x1, x2, out=None):
r"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical AND is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_or, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.logical_and(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_or(x1, x2, out=None):
r"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([True, True])
"""
return _mx_nd_np.logical_or(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_xor(x1, x2, out=None):
r"""
Compute the truth value of x1 XOR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_or, bitwise_or
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.logical_xor(x1, x2, out)
@set_module('mxnet.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.greater_equal(x1, x2, out)
@set_module('mxnet.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.less_equal(x1, x2, out)
@set_module('mxnet.numpy')
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : ndarray
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> np.roll(x, -2)
array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.roll(x2, 1)
array([[9., 0., 1., 2., 3.],
[4., 5., 6., 7., 8.]])
>>> np.roll(x2, -1)
array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 0.]])
>>> np.roll(x2, 1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, -1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, 1, axis=1)
array([[4., 0., 1., 2., 3.],
[9., 5., 6., 7., 8.]])
>>> np.roll(x2, -1, axis=1)
array([[1., 2., 3., 4., 0.],
[6., 7., 8., 9., 5.]])
"""
return _mx_nd_np.roll(a, shift, axis=axis)
@set_module('mxnet.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _mx_nd_np.rot90(m, k=k, axes=axes)
@set_module('mxnet.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
.. note::
* If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
* If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
return _mx_nd_np.hsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
.. note::
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
* Does not support 'optimal' strategy
* Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
* Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
return _mx_nd_np.einsum(*operands, **kwargs)
@set_module('mxnet.numpy')
def insert(arr, obj, values, axis=None):
r"""Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
.. note::
* Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
* If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
return _mx_nd_np.insert(arr, obj, values, axis=axis)
@set_module('mxnet.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
return _mx_nd_np.nonzero(a)
@set_module('mxnet.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array
q : array_like
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.percentile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def median(a, axis=None, out=None, overwrite_input=None, keepdims=False):
r"""Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float32``, then the output data-type is
``np.float32``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([7., 2.])
"""
return _mx_nd_np.median(a, axis=axis, overwrite_input=overwrite_input,
keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
* linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
* lower: i.
* higher: j.
* nearest: i or j, whichever is nearest.
* midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
.. note::
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
* q must be ndarray type even if it is a scalar
* do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
.. note::
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
* Does not support `max_work`, it is a dummy argument
* Actually it is same as `may_share_memory` in MXNet np
"""
return _mx_nd_np.shares_memory(a, b, max_work)
@set_module('mxnet.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
.. note::
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
* Does not support `max_work`, it is a dummy argument
* Actually it is same as `shares_memory` in MXNet np
"""
return _mx_nd_np.may_share_memory(a, b, max_work)
@set_module('mxnet.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
This is the same as the type of a in most cases.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _mx_nd_np.diff(a, n=n, axis=axis)
@set_module('mxnet.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : ndarray
If necessary, will be flattened before the differences are taken.
to_end : ndarray or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : ndarray or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1., 2., 3., -7.])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
rray([-99., 1., 2., 3., -7., 88., 99.])
The returned array is always 1D.
>>> y = np.array([[1, 2, 4], [1, 6, 24]])
>>> np.ediff1d(y)
array([ 1., 2., -3., 5., 18.])
>>> np.ediff1d(x, to_begin=y)
array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])
"""
return _mx_nd_np.ediff1d(ary, to_end=to_end, to_begin=to_begin)
@set_module('mxnet.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _mx_nd_np.resize(a, new_shape)
@set_module('mxnet.numpy')
def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments
r"""One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : ndarray
The x-coordinates of the interpolated values.
xp : 1-D array of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D array of floats
The y-coordinates of the data points, same length as `xp`.
left : optional float corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
Returns
-------
y : float (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
.. note::
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
return _mx_nd_np.interp(x, xp, fp, left=left, right=right, period=period)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.float64)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
return _mx_nd_np.full_like(a, fill_value=fill_value, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=0, dtype=dtype, order=order, ctx=ctx, out=ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=1, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def fill_diagonal(a, val, wrap=False):
"""
Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
locations with indices ``a[i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
The anti-diagonal can be filled by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.zeros((3, 3), int);
>>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
>>> a
array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
>>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
>>> a
array([[0, 0, 3],
[0, 2, 0],
[1, 0, 0]])
Note that the order in which the diagonal is filled varies depending
on the flip function.
"""
_mx_nd_np.fill_diagonal(a, val=val, wrap=wrap)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar
ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
Gluon does not support copy = False.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
@set_module('mxnet.numpy')
def squeeze(x, axis=None):
r"""Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
return _mx_nd_np.squeeze(x, axis=axis)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
.. note::
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _mx_nd_np.isnan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
.. note::
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _mx_nd_np.isinf(x, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _mx_nd_np.isposinf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _mx_nd_np.isneginf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _mx_nd_np.isfinite(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def where(condition, x=None, y=None):
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
return _mx_nd_np.where(condition, x, y)
@set_module('mxnet.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
.. note::
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
* Does not support poly1d.
* X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
return _mx_nd_np.polyval(p, x)
@set_module('mxnet.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return _mx_nd_np.bincount(x, weights=weights, minlength=minlength)
@set_module('mxnet.numpy')
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.
See also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(np.array(1), np.array([3, 4]))
[array([1.]), array([3., 4.])]
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_1d(*res)
@set_module('mxnet.numpy')
def atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.
See also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))
[array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_2d(*res)
@set_module('mxnet.numpy')
def atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
See also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):
... print(arr, arr.shape)
...
[[[1.]
[2.]]] (1, 2, 1)
[[[1.]
[2.]]] (1, 2, 1)
[[[1. 2.]]] (1, 1, 2)
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_3d(*res)
@set_module('mxnet.numpy')
def pad(x, pad_width=None, mode="constant", **kwargs): # pylint: disable=too-many-arguments
# pylint: disable=too-many-return-statements
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 2), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.pad(a, (2, 2), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, ((2, 2), (2, 2)), pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
return _mx_nd_np.pad(x, pad_width=pad_width, mode=mode, **kwargs)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
where : not supported
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _mx_nd_np.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)
@set_module('mxnet.numpy')
def dot(a, b, out=None):
"""
Dot product of two arrays. Specifically,
* If both `a` and `b` are 1-D arrays, it is inner product of vectors
* If both `a` and `b` are 2-D arrays, it is matrix multiplication,
* If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``np.multiply(a, b)`` or ``a * b`` is preferred.
* If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
* If `a` is an N-D array and `b` is a 2-D array, it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])
Parameters
----------
a : ndarray
First argument.
b : ndarray
Second argument.
out : ndarray, optional
Output argument. It must have the same shape and type as the expected output.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned
Examples
--------
>>> a = np.array(3)
>>> b = np.array(4)
>>> np.dot(a, b)
array(12.)
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0], [0, 1]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.dot(a, b)
array([[4., 1.],
[2., 2.]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(5*6)[::-1].reshape((6,5))
>>> np.dot(a, b)[2,3,2,2]
array(29884.)
>>> np.sum(a[2,3,2,:] * b[:,2])
array(29884.)
"""
return _mx_nd_np.dot(a, b, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _mx_nd_np.cumsum(a, axis=axis, dtype=dtype, out=out)
@set_module('mxnet.numpy')
def reshape(a, newshape, reverse, order='C'):
"""
Gives a new shape to an array without changing its data.
This function always returns a copy of the input array if
``out`` is not provided.
Parameters
----------
a : ndarray
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. Other order types such as 'F'/'A'
may be added in the future.
Returns
-------
reshaped_array : ndarray
It will be always a copy of the original array. This behavior is different
from the official NumPy ``reshape`` operator where views of the original array may be
generated.
See Also
--------
ndarray.reshape : Equivalent method.
Examples
--------
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0., 1., 2.],
[3., 4., 5.]])
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1., 2., 3., 4., 5., 6.])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1., 2.],
[3., 4.],
[5., 6.]])
"""
return _mx_nd_np.reshape(a, newshape, reverse, order)
@set_module('mxnet.numpy')
def moveaxis(a, source, destination):
"""Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose: Permute the dimensions of an array.
swapaxes: Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
return _mx_nd_np.moveaxis(a, source, destination)
@set_module('mxnet.numpy')
def copy(a): # pylint: disable=redefined-outer-name
"""
Return an array copy of the given object.
Parameters
----------
a : _Symbol
Input array.
Returns
-------
arr : _Symbol
Array interpretation of a.
-----
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return _mx_nd_np.copy(a)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : integer
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start: int, optional
The axis is rolled until it lies before this position.
The default, 0, results in a “complete” roll.
Returns
-------
res : ndarray
A view after applying rollaxis to `a` is returned.
-----
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
return _mx_nd_np.rollaxis(a, axis, start)
@set_module('mxnet.numpy')
def diag(v, k=0):
"""
Extracts a diagonal or constructs a diagonal array.
* 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.
* 2-D arrays: extracts the k-th Diagonal
Parameters
----------
array : ndarray
The array to apply diag method.
k : offset
extracts or constructs kth diagonal given input array
Returns
----------
out : ndarray
The extracted diagonal or constructed diagonal array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
return _mx_nd_np.diag(v, k=k)
@set_module('mxnet.numpy')
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _mx_nd_np.diagflat(v, k=k)
@set_module('mxnet.numpy')
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of
the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the
resulting array can be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
Parameters
----------
a : ndarray
Input data from which diagonal are taken.
offset: int, Optional
Offset of the diagonal from the main diagonal
axis1: int, Optional
Axis to be used as the first axis of the 2-D sub-arrays
axis2: int, Optional
Axis to be used as the second axis of the 2-D sub-arrays
Returns
-------
out : ndarray
Output result
Raises
-------
ValueError: If the dimension of a is less than 2.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> np.diagonal(a)
array([0, 3])
>>> np.diagonal(a, 1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>>a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.diagonal(a, 0, 0, 1)
array([[0, 6],
[1, 7]])
"""
return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
# pylint: disable=redefined-outer-name, too-many-arguments
@set_module('mxnet.numpy')
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : ndarray
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
Notes
-----
* Input type does not support Python native iterables.
* "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.
* "initial" param is not supported yet. Please use None as input.
* Arithmetic is modular when using integer types, and no error is raised on overflow.
* The sum of an empty array is the neutral element 0:
>>> a = np.empty(1)
>>> np.sum(a)
array(0.)
This function differs from the original `numpy.sum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.
* "initial" param is not supported yet. Please use ``None`` as input or skip it.
* The default type is float32.
Examples
--------
>>> a = np.array([0.5, 1.5])
>>> np.sum(a)
array(2.)
>>> a = np.array([0.5, 0.7, 0.2, 1.5])
>>> np.sum(a, dtype=np.int32)
array(2, dtype=int32)
>>> a = np.array([[0, 1], [0, 5]])
>>> np.sum(a)
array(6.)
>>> np.sum(a, axis=0)
array([0., 6.])
>>> np.sum(a, axis=1)
array([1., 5.])
With output ndarray:
>>> a = np.array([[0, 1], [0, 5]])
>>> b = np.ones((2,), dtype=np.float32)
>>> np.sum(a, axis = 0, out=b)
array([0., 6.])
>>> b
array([0., 6.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
array(-128, dtype=int8)
"""
return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)
# pylint: enable=redefined-outer-name, too-many-arguments
| apache-2.0 |
to-bee/members_python | web/actions/stv_downloader.py | 1 | 5376 | import os
import platform
from time import sleep
from uuid import uuid1
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from backend import settings
class StvSeleniumDownloader(object):
ENDPOINT_INIT = 'https://nav17.stv-fsg.ch/DynamicsNAV100-NAVUser/WebClient/SignIn.aspx'
def __init__(self):
selenium_folder = os.path.join(settings.BASE_DIR, '..', 'selenium')
system = platform.system().lower()
if 'windows' in system:
gecko_driver_path = 'geckodriver.exe'
elif 'linux' in system:
gecko_driver_path = 'geckodriver-linux64'
elif 'darwin' in system:
gecko_driver_path = 'geckodriver-osx'
else:
raise Exception('Unknown architecture')
self.driver_path = os.path.join(selenium_folder, gecko_driver_path)
if not os.path.exists(self.driver_path):
raise Exception('The given selenium driver doesn\'t exist')
def get_existing_file(self, download_folder):
# fname.endswith('.csv') or fname.endswith('.xls') or fname.endswith('.xlsx') or fname.endswith('.exe.part')
files = [os.path.join(download_folder, fname) for fname in os.listdir(download_folder) if not 'ds_store' in fname.lower()]
if len(files) > 0:
return files[-1]
def download_one(self, org, force_download):
if org.stv_username is None or org.stv_password is None:
raise Exception('STV Credentials are not set')
download_folder = os.path.join(settings.TMP_ROOT, 'selenium_download-{}'.format(org.stv_id))
if not os.path.isdir(download_folder):
os.makedirs(download_folder)
existing_file = None
else:
existing_file = self.get_existing_file(download_folder=download_folder)
if force_download or existing_file is None:
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.dir", download_folder)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.helperApps.alwaysAsk.force", False);
# fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "*") # Not working for linux gecko driver
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/xls;application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;text/csv;text/plain")
options = Options()
options.add_argument("--headless")
driver = webdriver.Firefox(executable_path=self.driver_path, firefox_profile=fp, firefox_options=options)
driver.get(self.ENDPOINT_INIT)
input_username = driver.find_element_by_xpath("//input[@id='ctl00_PHM_UserName']")
input_password = driver.find_element_by_xpath("//input[@id='ctl00_PHM_Password']")
input_username.send_keys(org.stv_username)
input_password.send_keys(org.stv_password)
driver.find_element_by_xpath("//input[@name='ctl00$PHM$LoginButton']").click()
sleep(5)
driver.find_element_by_xpath('//span[text()="Bericht"]').click()
sleep(3)
driver.find_element_by_xpath('//span[text()="Mitgliederverwaltung"]').click()
sleep(3)
driver.find_element_by_xpath('//span[text()="Bericht"]').click()
sleep(3)
driver.find_element_by_xpath('//span[text()="Gesamtliste"]').click()
sleep(15)
# Download not complete list without mobile number
# driver.find_element_by_xpath('//span[text()="Aktionen"]').click()
# sleep(3)
# driver.find_element_by_xpath('//span[text()="Kontaktliste"]').click()
# sleep(7)
# res = driver.find_elements_by_xpath("//span[contains(text(), 'In Excel')]")
# if res is not None:
# res[0].click()
# sleep(5)
driver.close()
# When you don't press enter, selenium will download the csv as a binary .exe file..?! WTF
# existing_file = self.get_existing_file(download_folder=download_folder)
existing_file_binary = self.get_existing_file(download_folder=download_folder)
if existing_file_binary is None:
raise Exception('The export file could not be downloaded')
existing_file = os.path.join(download_folder, 'list-{}.csv'.format(uuid1()))
os.rename(existing_file_binary, existing_file)
import pandas as pd # import here because of django debugger problems
# https://github.com/pandas-dev/pandas/issues/16620
df = pd.read_csv(open(existing_file, 'rb'), sep=';', encoding='latin1')
# df = pd.read_excel(open(existing_file, 'rb'), sheetname=0, logfile=open(os.devnull, 'w'))
# print(df.columns)
return df
def download_all(self, orgs, force_download):
"""
Files will be available under: org.export_path
:return:
"""
# Create map with all dataframes
dataframes = dict()
for org in orgs:
df = self.download_one(org, force_download)
dataframes[org.stv_id] = df
return dataframes | lgpl-3.0 |
xzh86/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
brhoades/lightup-graph-generator | generate.py | 1 | 4852 | import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import matplotlib.cm as cm
import pandas as pd
import re
import copy
import sys
fn = []
#fn2 = "MOEA-probdist-0.861.txt"
#fn1 = "MOEA-tournament-0.861.txt"
dn = []
#dn.append( "Tournament Selection")
#dn.append( "Roulette Wheel Selection" )
smfctr = 0.0003 #>0.0001 < 0.001 works best
x_axis_label = "Fitness Evaluations"
x_axis_ticks = 0 # 0 == auto
y_axis_label = "" #if blank, set to "Average Subfitness Over # Runs"
graph_label = "Easy Graph NSGA-II"
args = sys.argv[1:]
argument = re.compile('\-[A-Za-z]')
for i in range(0, len(args), 2):
if argument.match(args[i]):
if args[i] == "-t":
graph_label = args[i+1]
continue
fn.append(args[i])
dn.append(args[i+1])
#to be automatically determined later
st = 0
en = 0
stp = 0
log = []
for file in fn:
log.append( open(file, 'r' ) )
probs = []
lrun = 0
run = re.compile(r'Run [0-9]+')
end = re.compile(r'\=SPACER\=')
thisrun = []
thissim = []
inrun = False
for i in range(0,len(log)):
for line in log[i]:
if end.match(line):
inrun = False
thissim.append(thisrun)
#first time through we determine step, start, and end automatically
if en == 0:
en = lrun+stp #+stp due to graphing
continue
if run.match(line):
inrun = True
thisrun = [[] for i in range(7)]
continue
if inrun:
linel = line.split('\t')
#if necessary, determine our step, start, and end
for j in range(7):
if j != 0:
thisrun[j].append( float(linel[j].rstrip('\n')) )
else:
thisrun[j].append( int(linel[j].rstrip('\n')) )
lrun = int(linel[0])
probs.append(copy.deepcopy(thissim))
thissim = []
if x_axis_ticks == 0:
x_axis_ticks = int(en-stp/10)
bestdata = []
avgdata = []
# probs
# pri: [log1] [log2]
# problem (log1): [run1][run2][run3]...
# run (run1): [0][1]
# 0 => average list [.3,.5] 1 => bestlist [0.2, 1]
indicies = []
maxindex = -1
minindex = 1000000000000
for pri in range(len(probs)):
da = {}
db = {}
indicies.append(pd.Series(probs[pri][0][0], probs[pri][0][0]))
indexs = probs[pri][0][0]
if indexs[len(probs[pri][0][0])-1] > maxindex:
maxindex = indexs[len(probs[pri][0][0])-1]
if indexs[0] < minindex:
minindex = indexs[0]
for i in range(len(probs[pri])):
runname = ''.join(['run', str(i+1)])
for j in range(2,7,2):
da[''.join([runname, '.', str(j)])] = pd.Series(probs[pri][i][j], index=probs[pri][i][0])
for j in range(1,7,2):
db[''.join([runname, '.', str(j)])] = pd.Series(probs[pri][i][j], index=probs[pri][i][0])
bestdata.append(pd.DataFrame(da))
avgdata.append(pd.DataFrame(db))
#autogenerate our run count
if y_axis_label == "":
y_axis_label = ''.join(['Average Subfitness Over ', str(len(probs[0])), ' Runs'])
#grab our individual means and prepare to plot them
for i in range(len(bestdata)):
bestdata[i]['mean'] = bestdata[i].mean(1)
#add indecies after so we don't average them
bestdata[i]['index'] = pd.Series(indicies[i], index=indicies[i])
for i in range(len(avgdata)):
avgdata[i]['mean'] = avgdata[i].mean(1)
#add indecies after so we don't average them
avgdata[i]['index'] = pd.Series(indicies[i], index=indicies[i])
mdataframed = {}
maxiindex = [j for j in range(minindex,maxindex)]
for i in range(len(bestdata)):
iindex = [j for j in range(bestdata[i]['index'].min(),bestdata[i]['index'].max())]
s = interpolate.UnivariateSpline(bestdata[i]['index'], bestdata[i]['mean'], s=smfctr)
mdataframed[''.join([dn[i], ' Avg Best'])] = pd.Series(s(iindex)).reindex(index=maxiindex)
#for i in range(len(avgdata)):
#iindex = [j for j in range(avgdata[i]['index'].min(),avgdata[i]['index'].max())]
#s = interpolate.UnivariateSpline(avgdata[i]['index'], avgdata[i]['mean'], s=smfctr)
#mdataframed[''.join([dn[i], ' Average'])] = pd.Series(s(iindex)).reindex(index=maxiindex)
#mdataframed['Fitness Evaluations'] = maxiindex
mdataframe = pd.DataFrame(mdataframed)
colors = iter(cm.rainbow(np.linspace(0, 1, len(mdataframed))))
for subplot in mdataframe:
plt.plot(maxiindex, mdataframe[subplot], color=next(colors), label=subplot, antialiased=True, rasterized=True, linewidth=2)
#plt.autoscale(axis='x', tight=True)
#fig.set_xticks(np.arange(st,en-stp,x_axis_ticks))
#fig.set_yticks(np.arange(0,1.,0.1))
plt.xlabel(y_axis_label)
plt.xlabel(x_axis_label)
plt.title(graph_label)
plt.legend(loc=3, borderaxespad=0.)
plt.grid( )
#plt.savefig('rastered.pdf')
plt.savefig('4.1.png', dpi=500)
| mit |
pabryan/smc | src/build.py | 1 | 54952 | #!/usr/bin/env python
###############################################################################
#
# SageMathCloud: A collaborative web-based interface to Sage, IPython, LaTeX and the Terminal.
#
# Copyright (C) 2014-2015, William Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
# NOTE:
# There's a hack I'm using around line 171 of
# /usr/local/sage/current/local/lib/python/site-packages/IPython/html/notebookapp.py
# to get it to use my local static/ipython directory, for much better speed.
"""
Building the main components of cloud.sagemath.com from source, ensuring that all
important (usually security-related) options are compiled in.
The components are:
* python -- build managements and some packages
* node.js -- dynamic async; most things
* nginx -- static web server
* haproxy -- proxy and load balancer
* stunnel -- ssl termination
* tinc -- p2p vpn
* rethinkdb -- distributed push database
* bup -- git-ish backup
* sage -- we do *not* build or include Sage; it must be available system-wide or for
user in order for worksheets to work (everything but worksheets should work without Sage).
# Install critical packages needed for building SMC source code:
apt-get update && apt-get install vim git wget iperf dpkg-dev make m4 g++ gfortran liblzo2-dev libssl-dev libreadline-dev libsqlite3-dev libncurses5-dev git zlib1g-dev openjdk-7-jdk libbz2-dev libfuse-dev pkg-config libattr1-dev libacl1-dev par2 ntp pandoc ssh python-lxml calibre ipython python-pyxattr python-pylibacl software-properties-common libevent-dev xfsprogs lsof tk-dev linux-image-extra-virtual
# Ubuntu add and resource-wasting-on-every-ssh crap:
put `exit 0` at the beginning of `/etc/update-motd.d/50-landscape-sysinfo`
# Compute VM's
apt-get update && apt-get upgrade && apt-get install vim git wget iperf dpkg-dev make m4 g++ gfortran liblzo2-dev libssl-dev libreadline-dev libsqlite3-dev libncurses5-dev git zlib1g-dev openjdk-7-jdk libbz2-dev libfuse-dev pkg-config libattr1-dev libacl1-dev par2 ntp pandoc ssh python-lxml calibre ipython python-pyxattr python-pylibacl software-properties-common libevent-dev xfsprogs lsof tk-dev
# Critical to get rid of certain packages that just cause trouble:
apt-get remove mlocate
# Install https://github.com/williamstein/python-inotify and https://github.com/williamstein/bup-1 systemwide.
sudo su
cd /tmp && rm -rf python-inotify && git clone https://github.com/williamstein/python-inotify && cd python-inotify && python setup.py install && cd /tmp && rm -rf python-inotify bup-1 && git clone https://github.com/williamstein/bup-1 && cd bup-1 && make install && cd .. && rm -rf bup-1
# BASH
Add this to the top of /etc/bash.bashrc, at least for now, due to bugs in Ubuntu and vim?!
TERM=screen
# OBSPY --
Add this to /etc/apt/sources.list then "apt-get update; apt-get install python-obspy":
echo $'\n'"deb http://deb.obspy.org trusty main"$'\n' >> /etc/apt/sources.list && apt-get update && apt-get install python-obspy
# ATLAS:
apt-get install libatlas3gf-base liblapack-dev && cd /usr/lib/ && ln -s libatlas.so.3gf libatlas.so && ln -s libcblas.so.3gf libcblas.so && ln -s libf77blas.so.3gf libf77blas.so
This line is in the .sagemathcloud env, so building sage is fast for users (though not as performant)
export SAGE_ATLAS_LIB="/usr/lib/"
# Add this to /etc/ssh/sshd_config
MaxStartups 128
# Additional packages (mainly for users, not building).
apt-get install libmed1 libhdf5-mpich2-dev gmsh dstat emacs vim poppler-utils texlive texlive-* gv imagemagick octave mercurial flex bison unzip libzmq-dev uuid-dev scilab axiom yacas octave-symbolic quota quotatool dot2tex python-numpy python-scipy python-pandas python-tables libglpk-dev python-h5py zsh python3 python3-zmq python3-setuptools cython htop ccache python-virtualenv clang libgeos-dev libgeos++-dev sloccount racket libxml2-dev libxslt-dev irssi libevent-dev tmux sysstat sbcl gawk noweb libgmp3-dev ghc ghc-doc ghc-haddock ghc-mod ghc-prof haskell-mode haskell-doc subversion cvs bzr rcs subversion-tools git-svn markdown lua5.2 lua5.2-* encfs auctex vim-latexsuite yatex spell cmake libpango1.0-dev xorg-dev gdb valgrind doxygen haskell-platform haskell-platform-doc haskell-platform-prof mono-devel mono-tools-devel ocaml ocaml-native-compilers camlp4-extra proofgeneral proofgeneral-doc tuareg-mode ocaml-mode libgdbm-dev mlton sshfs sparkleshare fig2ps epstool libav-tools python-software-properties software-properties-common h5utils libnetcdf-dev netcdf-doc netcdf-bin tig libtool iotop asciidoc autoconf bsdtar attr libicu-dev iceweasel xvfb tree bindfs liblz4-tool tinc python-scikits-learn python-scikits.statsmodels python-skimage python-skimage-doc python-skimage-lib python-sklearn python-sklearn-doc python-sklearn-lib python-fuse cgroup-lite cgmanager-utils cgroup-bin libpam-cgroup cgmanager cgmanager-utils cgroup-lite cgroup-bin r-recommended libquantlib0 libquantlib0-dev quantlib-examples quantlib-python quantlib-refman-html r-cran-rquantlib libpng++-dev libcairomm-1.0-dev r-cran-cairodevice x11-apps mesa-utils libpangox-1.0-dev libf2c2-dev gnugo libapr1-dev libcap2-bin lbzip2 mosh smem libcurl4-openssl-dev jekyll lynx-cur root-system-bin libroot-bindings-python-dev libroot-graf2d-postscript5.34 csh x11vnc x11-apps meld aspell-* inkscape libopencv-dev build-essential checkinstall cmake pkg-config yasm libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine2-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils r-cran-rgl libgtk2.0-dev php5 python-docutils pdftk smlnj ml-lex ml-yacc p7zip-full check unison-all fonts-ocr-a libwebp-dev libpari-dev libpari-dbg pari-gp2c pari-galpol lzip ncompress ipython3 gpicview python-pip libedit-dev lrzip libgsl0-dev btrfs-tools tmpreaper hdf5-helpers libhdf5-cpp-8 libhdf5-dev scons wordnet pv golang-go libgraphviz-dev protobuf-compiler libcurl4-openssl-dev libboost-all-dev libjemalloc-dev xpra emacs-goodies-el python-mode dieharder jags unrar-free joe mc llvm ncbi-blast+ libavcodec-extra ffmpeg ocaml-batteries-included opam opam-docs libboost-python-dev libboost-signals-dev libcgal-dev gcc-multilib libc6-i386
# tmpreaper
Remove the security warning line in /etc/tmpreaper.conf so it actually runs.
# Python3-related packages of interest
apt-get install python3-pip libzmq3-dev python3-pandas python3-matplotlib python3-numpy python3-xlrd python3-nose bpython3 diveintopython3 libpython3-dev python3-dev python3-aeidon python3-alabaster python3-anyjson python3-astropy python3-audioread python3-args python3-babel python3-bottle python3-bs4 python3-bsddb3 python3-celery python3-changelog python3-cherrypy3 python3-crypto python3-cryptography python3-csb python3-cssutils python3-dateutil python3-decorator python3-defer python3-distutils-extra python3-django python3-django-xmlrpc python3-django-tables2 python3-django-model-utils python3-django-jsonfield python3-django-filters python3-dns python3-dnsq python3-doc python3-docutils python3-ecdsa python3-empy python3-examples python3-expiringdict python3-extras python3-feedparser python3-fftw3 python3-flake8 python3-flask python3-flask-sqlalchemy python3-flask-script python3-flask-principal python3-fysom python3-gdal python3-genshi python3-geoip python3-gmpy2 python3-gnupg python3-greenlet python3-gsw python3-h5py python3-httplib2 python3-icalendar python3-idna python3-ipy python3-jinja2 python3-jsmin python3-lesscpy python3-levenshtein python3-linop python3-mako python3-mia python3-misaka python3-mockito python3-mock python3-mpi4py python3-mpmath python3-msgpack python3-nose2 python3-nose2-cov python3-nine python3-numexpr python3-numpy python3-oauth python3-openssl python3-pandas python3-paramiko python3-pandocfilters python3-patsy python3-pep8 python3-persistent python3-pexpect python3-pil python3-pyasn1 python3-progressbar python3-potr python3-ply python3-pkginfo python3-pygraph python3-pygments python3-pyscss python3-pyramid python3-pyro4 python3-rdflib python3-releases python3-rsa python3-scipy python3-shortuuid python3-simplejson python3-skimage python3-six python3-sphinx python3-sphere python3-sqlalchemy python3-tables python3-testtools python3-urllib3 python3-venv python3-virtualenv python3-werkzeug python3-xlrd python3-xlsxwriter python3-yaml python3-zmq
# SAGE
Before building sage do:
Change this line in /etc/login.defs: "UMASK 077"
# Cgroups configuration (!!) -- very important!
echo "session optional pam_cgroup.so" >> /etc/pam.d/common-session
pam-auth-update # select defaults -- this probably isn't needed.
# Open Axiom --- see https://launchpad.net/~pippijn/+archive/ubuntu/ppa
echo $'\n'"deb http://ppa.launchpad.net/pippijn/ppa/ubuntu precise main"$'\n' >> /etc/apt/sources.list && apt-get update && sudo apt-get install open-axiom*
# Primesieve
As root do
cd /tmp && wget http://dl.bintray.com/kimwalisch/primesieve/primesieve-5.4.1.tar.gz && tar xf primesieve-5.4.1.tar.gz && cd primesieve-5.4.1 && ./configure && make -j 10 && make install && rm -rf /tmp/primesieve*
Check http://primesieve.org/build.html for the latest version.
# GAP3
Install 64-bit version from http://webusers.imj-prg.fr/~jean.michel/gap3/
umask 022 && cd /projects/sage && wget http://webusers.imj-prg.fr/~jean.michel/gap3/gap3-jm5.zip && unzip gap3-jm5.zip && rm gap3-jm5.zip && mv gap3-jm5 gap3 && cd gap3 && sudo ln -s /projects/sage/gap3/bin/gap.sh /usr/local/bin/gap3
vi /projects/sage/gap3/bin/gap.sh # set GAP_DIR to /projects/sage/gap3
# OpenCV Computer Vision (not sure if I want to continue with this! -- it conflicts with systemwide ffmpeg)
# See http://stackoverflow.com/questions/26592577/installing-opencv-in-ubuntu-14-10
# Test: "import cv2"
iptables -F && cd /tmp&& rm -rf libvpx && git clone https://chromium.googlesource.com/webm/libvpx && cd libvpx/ && ./configure --disable-static --enable-shared && make -j20 install && chmod a+r /usr/local/lib/*libvpx* && rm /usr/lib/x86_64-linux-gnu/*libvpx* && cp -av /usr/local/lib/*libvpx* /usr/lib/x86_64-linux-gnu/ && cd .. && rm -rf libvpx && rm -rf opencv && mkdir opencv && cd opencv && git clone git://source.ffmpeg.org/ffmpeg.git && cd ffmpeg && ./configure --enable-libvpx --enable-shared --disable-static && make -j20 install && cd .. && rm -rf ffmpeg && wget http://downloads.sourceforge.net/project/opencvlibrary/opencv-unix/2.4.10/opencv-2.4.10.zip && unzip opencv-2.4.10.zip && cd opencv-2.4.10 && mkdir build && cd build && time cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON -D WITH_QT=ON -D WITH_OPENGL=ON .. && time make -j12 && make install && sh -c 'echo "/usr/local/lib" > /etc/ld.so.conf.d/opencv.conf' && sudo ldconfig && cd /tmp && rm -rf opencv
# then
mv /usr/local/bin/ffmpeg /usr/local/bin/ffmpeg.0
# KWANT
apt-add-repository ppa:kwant-project/ppa && apt-get update && apt-get install python-kwant python-kwant-doc
# Octave: needed by octave for plotting:
# I tediously got this list of things that would install by not installing 'msh', 'bim', 'secs1d'
apt-get install octave-audio octave-biosig octave-common octave-communications octave-communications-common octave-control octave-data-smoothing octave-dataframe octave-dbg octave-doc octave-econometrics octave-epstk octave-financial octave-fpl octave-ga octave-gdf octave-general octave-geometry octave-gmt octave-gsl octave-htmldoc octave-image octave-info octave-io octave-lhapdf octave-linear-algebra octave-miscellaneous octave-missing-functions octave-mpi octave-nan octave-nlopt octave-nurbs octave-ocs octave-octcdf octave-odepkg octave-openmpi-ext octave-optim octave-optiminterp octave-parallel octave-pfstools octave-pkg-dev octave-psychtoolbox-3 octave-quaternion octave-secs2d octave-signal octave-sockets octave-specfun octave-splines octave-statistics octave-strings octave-struct octave-sundials octave-symbolic octave-tsa octave-vlfeat octave-vrml octave-zenity
cd /usr/share/fonts/truetype && ln -s liberation ttf-liberation
# Dropbox: so it's possible to setup dropbox to run in projects... at some point (users could easily do this anyways, but making it systemwide is best).
Get it here: https://www.dropbox.com/install?os=lnx
# Neovim system-wide:
cd /tmp && rm -rf neovim && unset MAKE && git clone https://github.com/neovim/neovim && cd neovim && make && umask 022 && sudo make install && rm -rf /tmp/neovim
# MACAULAY2: Install Macaulay2 system-wide from here: http://www.math.uiuc.edu/Macaulay2/Downloads/
apt-get install libntl-dev libntl0 libpari-gmp-tls4 libpari-dev pari-gp2c && cd /tmp/ && rm -rf m2 && mkdir m2 && cd m2 && wget http://www.math.uiuc.edu/Macaulay2/Downloads/Common/Macaulay2-1.7-common.deb && wget http://www.math.uiuc.edu/Macaulay2/Downloads/GNU-Linux/Ubuntu/Macaulay2-1.7-amd64-Linux-Ubuntu-14.10.deb && sudo dpkg -i *.deb && rm -rf /tmp/m2
# Julia: from http://julialang.org/downloads/
add-apt-repository ppa:staticfloat/juliareleases && add-apt-repository ppa:staticfloat/julia-deps && apt-get update && apt-get install julia julia-doc
# Nemo (after installing Julia)
umask 022
export JULIA_PKGDIR=/usr/local/share/julia/site/
echo 'Pkg.clone("https://github.com/wbhart/Nemo.jl")' | julia
echo 'Pkg.build("Nemo")' | julia
export LD_LIBRARY_PATH=/usr/local/share/julia/site/v0.4/Nemo/local/lib
cd $LD_LIBRARY_PATH; ln -s ln -s libarb.so.0.0.0 libarb.so
echo 'using Nemo' | julia
To test, do this from Julia:
using Nemo
# GIAC
Add to /etc/apt/sources.list:
deb http://www-fourier.ujf-grenoble.fr/~parisse/debian/ stable main
Then
apt-get update; apt-get install giac python-giacpy
# FEnICS: automated solution of differential equations by finite element methods
(Test with "import dolfin".)
add-apt-repository ppa:fenics-packages/fenics && apt-get update && apt-get install fenics
# System-wide Python packages not through apt:
apt-get install python-pip python3-pip && umask 022 && /usr/bin/pip install -U theano && /usr/bin/pip install -U clawpack
# IPYTHON3 in Python3 systemwide
sudo pip3 install --upgrade ipython ipywidgets
sudo ipython3 kernelspec install-self rethinkdb filterpy
Then edit /usr/local/share/jupyter/kernels/python3 and add a "-E" option before "-m" so that python3 can start with the sage -sh environment set.
# IJULIA
sudo su
umask 022; export JULIA_PKGDIR=/usr/local/share/julia/site/; julia
julia> Pkg.init()
julia> Pkg.add("IJulia")
# this copy may change when ipython dir changes
cp -rv "/root/.sage/ipython-2.3.0.p0/kernels/julia 0.3" "/usr/local/share/jupyter/kernels/julia 0.3"
Make sure the json file is this (it should be, with no change):
vi "/usr/local/share/jupyter/kernels/julia 0.3/kernel.json"
{
"display_name": "Julia",
"argv": [
"/usr/bin/julia",
"-i",
"-F",
"/usr/local/share/julia/site/v0.3/IJulia/src/kernel.jl",
"{connection_file}"
],
"language": "julia"
}
# R Kernel support for Jupyter (see https://github.com/IRkernel/IRkernel)
sudo su
umask 022
# and make this file: /usr/local/share/jupyter/kernels/ir/kernel.json
{
"language": "r",
"argv": [
"R",
"-e",
"IRkernel::main()",
"--args",
"{connection_file}"
],
"display_name": "R"
}
# POLYMAKE system-wide
# From http://www.polymake.org/doku.php/howto/install
# Get latest from http://www.polymake.org/doku.php/download/start and build:
apt-get install ant default-jdk g++ libboost-dev libgmp-dev libgmpxx4ldbl libmpfr-dev libperl-dev libsvn-perl libterm-readline-gnu-perl libxml-libxml-perl libxml-libxslt-perl libxml-perl libxml-writer-perl libxml2-dev w3c-dtd-xhtml xsltproc && cd /tmp/&& wget http://www.polymake.org/lib/exe/fetch.php/download/polymake-2.14r1.tar.bz2&& tar xvf polymake-2.14r1.tar.bz2 && cd polymake-2.14 && ./configure && make && make install && rm -rf /tmp/polymake*
# Make ROOT data analysis ipython notebook support system-wide work.
cd /usr/lib/x86_64-linux-gnu/root5.34 && wget https://gist.githubusercontent.com/mazurov/6194738/raw/67e851fdac969e670a11296642478f1801324b8d/rootnotes.py && chmod a+r * && echo "import sys; sys.path.extend(['/usr/lib/python2.7/dist-packages/', '/usr/lib/pymodules/python2.7', '/usr/lib/x86_64-linux-gnu/root5.34/', '/usr/local/lib/python2.7/dist-packages'])"$'\n' > /usr/local/sage/current/local/lib/python/sitecustomize.py
# Install 4ti2 system-wide...
export V=1.6.2 && cd /tmp && rm -rf 4ti2 && mkdir 4ti2 && cd 4ti2 && wget http://www.4ti2.de/version_$V/4ti2-$V.tar.gz && tar xf 4ti2-$V.tar.gz && cd 4ti2-$V && ./configure --prefix=/usr/local/ && time make -j8
make install && rm -rf /tmp/4ti2 # this *must* be a separate step!! :-(
# Add to /etc/security/limits.conf
Add these two lines two `/etc/security/limits.conf` so that bup works with large number of commits.
echo $'\n'"root soft nofile 20000"$'\n' >> /etc/security/limits.conf
echo "root hard nofile 20000"$'\n' >> /etc/security/limits.conf
# These to avoid fork-bombs:
echo "* soft nproc 1000"$'\n' >> /etc/security/limits.conf
echo "* hard nproc 1100"$'\n' >> /etc/security/limits.conf
echo "root soft nproc 20000"$'\n' >> /etc/security/limits.conf
echo "root hard nproc 20000"$'\n' >> /etc/security/limits.conf
# Setup /usr/local/bin/skel
rsync -axvHL ~/salvus/salvus/local_hub_template/ ~/.sagemathcloud/
cd ~/.sagemathcloud && . sagemathcloud-env && ./build
cd /usr/local/bin/ && sudo ln -s /home/salvus/salvus/salvus/scripts/skel/ . && cd ~/salvus/salvus/scripts/skel/ && rm -rf .sagemathcloud && mv ~/.sagemathcloud .
# Salvus (needs more!)
cd /home/salvus/salvus/salvus/
./install.py all
# MPI -- see http://stackoverflow.com/questions/12505476/using-mpich-with-boost-mpi-on-ubuntu
apt-get install mpich mpich-doc libmpich-dev && update-alternatives --set mpi /usr/include/mpich
# KVM HOSTS
On the VM hosts, some things are critical:
# Do this or VM's may be unstartable for a very, very long time.
echo never > /sys/kernel/mm/transparent_hugepage/enabled; echo never > /sys/kernel/mm/transparent_hugepage/defrag
# put this in cron since it's so critical that the perms are right... or vm's won't start
*/10 * * * * sudo chmod a+r /boot/vmlinuz-*; sudo chmod a+rw /dev/fuse
In /etc/sysctl.conf, put:
vm.swappiness=1
# Critical for compute VM's using google cloud storage:
sudo pip uninstall crcmod; sudo pip install -U crcmod
# Build Sage (as usual)
Get Sage and pull my patches from this repo!
https://github.com/sagemath/sagesmc/commits/develop
umask 022
#export SAGE_ATLAS_LIB=/usr/lib/ #<--- too slow!
export MAKE="make -j20"
make
# SAGE SCRIPTS -- once only, ever. Not needed when sage is upgraded.
Do from within Sage (as root):
install_scripts('/usr/local/bin/',ignore_existing=True)
# Delete cached packages
#cd SAGE_ROOT
rm -rf upstream local/var/tmp/sage/build/
# Run sage one last time
./sage
# Copy over the newest SageTex, so it actually works (only do this with the default sage):
sudo su
umask 022 && cp -rv /usr/local/sage/current/local/share/texmf/tex/generic/sagetex /usr/share/texmf/tex/latex/ && texhash
# System-wide Python pip packages
sudo su
umask 022
pip install twitter ctop
pip3 install --upgrade twitter sympy uncertainties zope.interface scikit-learn datasift
pip3 install --upgrade numba
# The netcd4 system-wide python package requires some crazy environment variables to work:
export PROJ_DIR=/usr; export NETCDF4_DIR=/usr; export HDF5_DIR=/usr/lib/x86_64-linux-gnu/hdf5/serial/; export HDF5_DIR=/usr/; export C_INCLUDE_PATH=/usr/lib/openmpi/include; export USE_NCCONFIG=0; export HDF5_INCDIR=/usr/include/hdf5/serial; export HDF5_LIBDIR=/usr/lib/x86_64-linux-gnu/hdf5/serial; export HDF5_INCDIR=/usr/include/hdf5/serial
pip3 install --upgrade netcdf4
# And for normal python2:
pip install datasift bokeh
# System-wide git trac
cd /tmp && git clone https://github.com/sagemath/git-trac-command.git && cd git-trac-command && sudo setup.py install && rm -rf /tmp/git-trac-command
# X11
Add this line
X11UseLocalhost no
to
/etc/ssh/sshd_config
# HORRIBLE STUFF
Modified some code in axes3d.py in here:
salvus@compute1-us:/projects/sage/sage-6.7/local/lib/python2.7/site-packages/mpl_toolkits
self._draw_grid = False if b == "off" else bool(b)
#self._draw_grid = cbook._string_to_bool(b)
# EVEN MORE GORE
Install a temporary Rscript wrapper, because there is no `sage -Rscript` as a pendant to `sage -R`:
$ cat /usr/local/bin/Rscript
#!/usr/bin/env bash
SAGEDIR=$(dirname $(readlink -f $(which sage)))
exec sage -sh -c "$SAGEDIR/local/bin/Rscript $@"
"""
import logging, os, shutil, subprocess, sys, time, urllib2
# Enable logging
logging.basicConfig()
log = logging.getLogger('')
log.setLevel(logging.DEBUG) # WARNING, INFO
OS = os.uname()[0]
PWD = os.path.abspath('.')
DATA = os.path.abspath('data')
SRC = os.path.abspath('src')
PATCHES= os.path.join(SRC, 'patches')
BUILD = os.path.abspath(os.path.join(DATA, 'build'))
PREFIX = os.path.abspath(os.path.join(DATA, 'local'))
os.environ['PREFIX'] = PREFIX
if 'MAKE' in os.environ:
del os.environ['MAKE']
NODE_MODULES = [
'commander',
# I had to fork the official start-stop-daemon, since it is broken with
# newer node versions -- https://github.com/sagemathinc/start-stop-daemon
'sagemathinc/start-stop-daemon',
'winston',
'primus', # websocket abstraction
'ws', # fast low-level websocket depedency for primus
'sockjs', # not used but is optionally available in hub/primeus/client
'engine.io', # this is the one we use -- seems by far the best overall. CAREFUL WITH DNS!
'coffee-script',
'node-uuid',
'[email protected]',
'uglify-js2',
'express', # web server
'express-session', # needed for oauth1 bitbucket auth
'body-parser', # parse post form uploads (needed for auth)
'passport',
'passport-bitbucket',
'passport-dropbox-oauth2',
'passport-facebook',
'passport-github',
'passport-google-oauth',
'passport-local',
'passport-twitter',
'passport-wordpress',
'nodeunit',
'validator',
'async',
'password-hash',
'nodemailer',
'nodemailer-sendgrid-transport',
'cookies',
'htmlparser',
'mime',
'pty.js',
'posix',
'mkdirp',
'walk',
'temp',
'formidable@latest',
'moment',
'underscore',
'read',
'hashring',
'rimraf',
'net-ping',
'marked',
'node-sass', # transspiller for *.sass to *.css (rootfile is page/index.sass)
'http-proxy', # https://github.com/nodejitsu/node-http-proxy
'stripe', # for billing -- https://github.com/stripe/stripe-node
'blocked', # checking for blocking
'sqlite3',
'pdfkit',
'coffee-react', # used for react (obviously)
'dirty', # terrible key-value store
'gaze', # file watcher
'react', # facebook's core react library
'flummox', # flux implementation for react
'react-bootstrap', # bootstrap components
'rethinkdb'
]
# this is for the python in the /home/salvus/... place, not the system-wide or sage python!
PYTHON_PACKAGES = [
'readline',
'ipython', # a usable command line (ipython uses readline)
'python-daemon', # daemonization of python modules
'paramiko', # ssh2 implementation in python
'pyyaml' # used by wizard build script
]
SAGE_PIP_PACKAGES = [
'mpld3', # D3 Renderings of Matplotlib Graphics -- https://github.com/jakevdp/mpld3
'mercurial', # used when installing neuron
'backports.ssl-match-hostname', # a dependency of tornado (we don't install deps automatically right now)
'tornado', # used by IPython notebook
'pandas',
'pandasql',
'patsy',
'statsmodels',
'numexpr',
'tables',
'scikit_learn',
'theano',
'scikit-image',
'Shapely',
'SimPy',
'xlrd',
'xlwt',
'pyproj',
'bitarray',
'h5py',
'netcdf4',
'patsy',
'lxml',
'munkres',
'oct2py',
'psutil',
'requests', # Python HTTP for Humans. (NOTE: plotly depends on requests)
'plotly',
'mahotas',
'rpy2', # We have to upgrade rpy2, since the one in sage is so old, and it breaks IPython Notebook's R interface.
'clawpack',
'psycopg2', # Python-PostgreSQL Database Adapter
'nose', # nose extends unittest to make testing easier
'redis', # Python client for Redis key-value store
'pymongo', # Python driver for MongoDB
'fabric', # Fabric is a simple, Pythonic tool for remote execution and deployment.
'MySQL-python', # Python interface to MySQL
'paramiko', # SSH2 protocol library
'httplib2', # A comprehensive HTTP client library.
'greenlet', # Lightweight in-process concurrent programming
'gmpy2',
'mmh3',
'joblib',
'colorpy',
#'rootpy', # supports ROOT data analysis framework -- broken "import ROOT" doesn't work anymore
'tabulate',
'goslate', # google translate api -- http://pythonhosted.org/goslate/
'certifi', # dependency of https://github.com/obspy, which is installed systemwide from an ubuntu package repo
'ez_setup', # needed by fipy
#'pysparse', # needed by fipy; for the ==1.2-dev213 bullshit, see http://stackoverflow.com/questions/25459011/how-to-build-pysparse-on-ubuntu; it's amazing how bad pypi and python packaging are. Wow.
'fipy', # requested by Evan Chenelly <[email protected]> -- "A finite volume PDE solver in Python".
# to get it to build had to instead download directly and comment out these lines from setup.py
# #import ez_setup; ez_setup.use_setuptools()
'python-igraph', # requested by Santhust <[email protected]> -- "High performance graph data structures and algorithms" -- https://pypi.python.org/pypi/python-igraph/0.7
'mygene', # requested by Luca Beltrame for a bioinformatics course
'singledispatch', # needed by rpy2 ipython extension now
'qutip', # QuTiP is open-source software for simulating the dynamics of open quantum systems.
'tinyarray',
'pysal', # requested by Serge Rey of ASU for a course on Geographic Information Analysis
'folium', # requested by Serge Rey of ASU for a course on Geographic Information Analysis
'pint', # units package: http://pint.readthedocs.org/en/0.6/
'seaborn',
'ipythonblocks',
'line_profiler',
'astropy',
'mrjob',
'boto',
'pattern',
'seaborn',
'brewer2mpl',
'ggplot',
'periodictable',
'nltk',
'param',
'holoviews',
'plink',
'spherogram',
'FXrays',
'snappy',
'twitter',
'bayespy',
'astropy',
'aplpy',
'PyDSTool',
'progressbar', # requested by David Lisbonne
'pdfminer', # requested by Mesut Karakoc
'wcsaxes',
'reproject',
'txaio', 'six','autobahn','python-dateutil','service-identity','datasift', # the things to left are deps for datasift. This is horrible, but if I don't do this the install fails trying to upgrade a system-wide installed ubuntu pip package.
'scikits.bootstrap',
'pystan',
'biopython',
'guppy',
'nose',
'pybtex',
'bokeh',
'numba'
]
SAGE_PIP_PACKAGES_ENV = {'clawpack':{'LDFLAGS':'-shared'}}
# Pip packages but where we *do* install deps
SAGE_PIP_PACKAGES_DEPS = [
'Nikola[extras]',
'enum34', 'singledispatch', 'funcsigs', 'llvmlite', # used for numba
'beautifulsoup4',
'filterpy'
]
R_PACKAGES = [
'ggplot2',
'stringr',
'plyr',
'reshape2',
'zoo',
'car',
'mvtnorm',
'e1071',
'Rcpp',
'lattice',
'KernSmooth',
'Matrix',
'cluster',
'codetools',
'mgcv',
'rpart',
'survival',
'fields',
'circular',
'glmnet',
'Cairo',
#'xlsx',
'XML',
'data.table',
'brian',
'rugarch',
'quantmod',
'swirl',
'psych',
'spatstat',
'UsingR',
'readr',
'MCMCpack',
'ROCR',
'forecast',
'numDeriv',
'Matrix',
'NORMT3',
'ggmap',
'np',
'crs',
'SemiParBIVProbit',
'combinat',
'maptree'
]
SAGE_OPTIONAL_PACKAGES = [
'chomp',
'database_cremona_ellcurve',
'database_odlyzko_zeta',
'database_pari',
'cbc',
'cluster_seed',
'coxeter3',
'cryptominisat',
'cunningham_tables',
'database_gap',
'database_jones_numfield',
'database_kohel',
'database_symbolic_data',
'dot2tex',
'fricas',
'gambit',
'gap_packages',
'gnuplotpy',
'kash3',
'lie',
'mcqd',
'nauty',
'normaliz',
'nzmath',
'ore_algebra',
'p_group_cohomology', # currently broken
'phc',
'pycryptoplus',
'pyx',
'qhull',
'topcom',
'4ti2',
'modular_decomposition',
'csdp' # experimental; non-GPL compatible, but that is OK as we are not distributing. commercial use encouraged.
]
ENTHOUGHT_PACKAGES = [
'pyface',
'traits',
'scimath',
]
if not os.path.exists(BUILD):
os.makedirs(BUILD)
if 'SAGE_ROOT' not in os.environ:
log.info("Building salvus user code (so updating PATHs...)")
os.environ['PATH'] = os.path.join(PREFIX, 'bin') + ':' + os.environ['PATH']
os.environ['LD_LIBRARY_PATH'] = os.path.join(PREFIX, 'lib') + ':' + os.environ.get('LD_LIBRARY_PATH','')
else:
log.info("Building/updating a Sage install")
# number of cpus
try:
NCPU = os.sysconf("SC_NPROCESSORS_ONLN")
except:
NCPU = int(subprocess.Popen("sysctl -n hw.ncpu", shell=True, stdin=subprocess.PIPE,
stdout = subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True).stdout.read())
log.info("detected %s cpus", NCPU)
def cmd(s, path=None):
if path is not None:
s = 'cd "%s" && '%path + s
log.info("cmd: %s", s)
t0 = time.time()
if os.system(s):
raise RuntimeError('command failed: "%s" (%s seconds)'%(s, time.time()-t0))
else:
log.info("cmd %s took %s seconds", s, time.time()-t0)
def download(url):
# download target of given url to SRC directory
import urllib
t0 = time.time()
target = os.path.join(SRC, os.path.split(url)[-1].split('?')[0])
log.info("Downloading %s to %s..."%(url, target))
urllib.urlretrieve(url, target)
log.info("Took %s seconds"%(time.time()-t0))
return target
def extract_package(basename):
log.info("extracting package %s by finding tar ball in SRC directory, extract it in build directory, and return resulting path",
basename)
for filename in os.listdir(SRC):
if filename.startswith(basename):
i = filename.rfind('.tar.')
if i == -1:
i = filename.rfind('.tgz')
path = os.path.join(BUILD, filename[:i])
if os.path.exists(path):
log.info("removing existing path %s", path)
shutil.rmtree(path)
cmd('tar xf "%s"'%os.path.abspath(os.path.join(SRC, filename)), BUILD)
return path
raise RuntimeError("unable to extract package %s"%basename)
#######################$###################################################
# Functions that install extra packages and bug fixes to turn a standard
# Sage install into the one used in SMC.
###########################################################################
class BuildSage(object):
def __init__(self):
try:
from sage.all import SAGE_ROOT
except:
raise RuntimeError("BuildSage must be run from within a Sage install")
self.SAGE_ROOT = SAGE_ROOT
def path(self, path):
"""
Turn a path relative to SAGE_ROOT into an absolute path.
"""
return os.path.join(self.SAGE_ROOT, path)
def cmd(self, s):
cmd(s, self.SAGE_ROOT)
def everything(self):
"""
Do everything to patch/update/install/enhance this Sage install.
"""
self.pull_smc_sage()
#self.unextend_sys_path()
self.patch_sage_location()
self.patch_banner()
self.patch_sage_env()
self.user_site()
self.install_sloane()
self.install_projlib()
self.install_pip() # sage's is of course always hopelessly out of date
self.install_pip_packages()
self.install_jinja2() # since sage's is too old and pip packages doesn't upgrade
self.install_R_packages()
self.install_R_bioconductor()
self.install_optional_packages()
self.install_quantlib()
self.install_basemap()
self.install_pydelay()
self.install_gdal()
self.install_stein_watkins()
self.install_jsanimation()
self.install_sage_manifolds()
self.install_r_jupyter_kernel()
self.install_cv2()
self.install_cairo()
self.install_psage()
self.clean_up()
#self.extend_sys_path()
self.fix_permissions()
self.install_ipython_patch() # must be done manually still
# drepecated
#self.install_enthought_packages() # doesn't work anymore; they don't really want this.
#self.install_4ti2() # no longer needed since 4ti2 sage optional package finally works again...
# FAILED:
self.install_pymc() # FAIL -- also "pip install pymc" fails.
self.install_neuron()
def install_sage_manifolds(self):
# TODO: this will probably fail due to an interactive merge request (?)
self.cmd("cd $SAGE_ROOT && git pull https://github.com/sagemanifolds/sage.git </dev/null && sage -br < /dev/null")
def install_r_jupyter_kernel(self):
# see https://github.com/IRkernel/IRkernel
self.cmd(r"""echo 'install.packages("devtools", repos="http://ftp.osuosl.org/pub/cran/"); install.packages("RCurl", repos="http://ftp.osuosl.org/pub/cran/"); install.packages("base64enc", repos="http://ftp.osuosl.org/pub/cran/"); install.packages("uuid", repos="http://ftp.osuosl.org/pub/cran/"); library(devtools); install_github("armstrtw/rzmq"); install_github("IRkernel/repr"); install_github("IRkernel/IRdisplay"); install_github("IRkernel/IRkernel");' | R --no-save""")
def pull_smc_sage(self):
self.cmd("cd $SAGE_ROOT && git pull https://github.com/sagemathinc/smc-sage")
def install_jinja2(self):
self.cmd("pip install -U jinja2")
def install_ipython_patch(self):
"""
TODO:
"""
raise RuntimeError(r"""TODO: change 'local/lib/python/site-packages/notebook/notebookapp.py' to 'static_url_prefix = '/static/jupyter/''""")
def install_jsanimation(self):
self.cmd("cd /tmp && rm -rf JSAnimation && git clone https://github.com/jakevdp/JSAnimation.git && cd JSAnimation && python setup.py install && rm -rf /tmp/JSAnimation")
def install_psage(self):
self.cmd("cd /tmp/&& rm -rf psage && git clone [email protected]:williamstein/psage.git&& cd psage&& sage setup.py install && rm -rf /tmp/psage")
def install_cv2(self):
self.cmd("cd $SAGE_ROOT && cp -v /usr/local/lib/python2.7/dist-packages/*cv2* local/lib/python2.7/")
def install_cairo(self):
self.cmd("cd /tmp && rm -rf py2cairo && git clone git://git.cairographics.org/git/py2cairo && cd py2cairo && ./autogen.sh && ./configure --prefix=$SAGE_ROOT/local && make install")
def patch_sage_location(self):
"""
Since we build Sage in-place and never move it, the sage-location script
is a total waste of time, which only gets worse the more optional packages
we install. Thus we disable it completely.
"""
target = self.path("local/bin/sage-location")
f = open(target).read()
before = "'__main__':"
after = "'__main__' and False:"
if before in f:
log.info("patching %s"%target)
f = f.replace(before, after)
open(target,'w').write(f)
else:
if after not in f:
raise RuntimeError("unable to patch %s"%target)
log.info("already patched %s"%target)
def patch_banner(self):
"""
The default Sage banner is too verbose, frightening (since I always run devel versions),
and misleading -- since notebook() doesn't work on SMC, and help(...) is basically useless.
"""
path = self.path("local/bin/sage-banner")
v = open(path).readlines()
if len(v) < 5:
log.info("Sage banner already patched.")
else:
log.info("Patching the Sage banner.")
v[3] = '\xe2\x94\x82 Enhanced for SageMathCloud. \xe2\x94\x82\n'
w = [v[i] for i in [0,1,3,4]]
open(path,'w').write(''.join(w))
def patch_sage_env(self):
"""
Many optional Sage packages are still up as optional packages, but they **DON'T work**
due to Andrew/Volker/whoever deprecating SAGE_DATA before they updated our optional
packages accordingly (which sucks). Anyways, this works around the issue for now, which is
at least present in sage-6.2.rc0.
"""
path = self.path("src/bin/sage-env")
f = open(path).read()
target = 'export SAGE_DATA="$SAGE_SHARE"'
if target not in f:
log.info("patching %s"%path)
open(path,'a').write('\n'+target)
else:
log.info("%s already patched"%path)
data = self.path("data")
if not os.path.exists(data):
# absolute paths are fine, since we will NEVER be moving this sage install
os.symlink(self.path("local/share"), data)
os.environ['SAGE_DATA'] = os.environ['SAGE_SHARE']
def octave_ext(self):
"""
The /usr/local/sage/current/local/share/sage/ext must be writeable by all, which is
a stupid horrible bug/shortcoming in Sage that people constantly hit. As a workaround,
we link it to a constrained filesystem for this purpose.
"""
target = self.path("local/share/sage/ext")
src = "/pool/ext"
if not (os.path.exists(src) and os.path.isdir(src)):
raise RuntimeError("please create a limited ZFS pool mounted as /pool/ext, with read-write access to all:\n\n\tzfs create pool/ext && chmod a+rwx /pool/ext && zfs set quota=1G pool/ext\n")
if os.path.exists(target):
try:
shutil.rmtree(target)
except:
os.unlink(target)
os.symlink(src, target)
def user_site(self):
import site
if not site.ENABLE_USER_SITE:
raise RuntimeError("Make sure to patch out this -- http://trac.sagemath.org/ticket/14243 -- by removing the stuff involving PYTHONNOUSERSITE from src/bin/sage-env")
def install_sloane(self):
"""
Install the Sloane Encyclopaedia tables. These used to be installed via an optioanl package,
but instead one must now run a command from within Sage.
"""
from sage.all import SloaneEncyclopedia
SloaneEncyclopedia.install(overwrite=True)
def install_projlib(self):
"""
Install the proj cartographic transformations and geodetic computations library
into Sage, which is a dep for the pyproj pip package.
"""
version_base = "4.9.2" # TODO need to automate finding newest!
version = version_base + "" # find newest version at http://download.osgeo.org/proj/?C=M;O=D
download("http://download.osgeo.org/proj/proj-%s.tar.gz"%version)
path = extract_package("proj-%s"%version)
# their tarball if annoying, with path not what is before .tar.gz. UGH.
i = path.find(version_base)
path = path[:i+len(version_base)]
cmd("./configure --prefix=%s"%self.SAGE_ROOT, path)
cmd("make -j%s install"%NCPU, path)
def install_pip(self):
"""Install pip itself into Sage; it should come with Sage, but doesn't yet."""
self.unextend_sys_path()
cmd("pip install --upgrade pip")
def extend_sys_path(self):
"""
Make this Sage install able to import modules installed in the system-wide
Python, e.g., make it so there is some hope that maybe
'import dolfin' works, even though dolfin is some
complicated FEM library installed system-wide via Ubuntu packages
This MUST be done *after* pip is installed.
"""
raise RuntimeError("this is a VERY bad idea -- see https://groups.google.com/forum/#!topic/sage-release/MGkb_-y-moM")
target = self.path("local/lib/python/sitecustomize.py")
ROOT = '/usr/lib/x86_64-linux-gnu/' + [x for x in os.listdir('/usr/lib/x86_64-linux-gnu/') if 'root' in x][-1]
paths = ['/usr/lib/python2.7/dist-packages/', '/usr/local/lib/python2.7/dist-packages/', '/usr/lib/pymodules/python2.7', ROOT]
# sanity check
for p in paths:
if not os.path.exists(p):
raise RuntimeError("path %s does not exist"%p)
f = open(target).read() if os.path.exists(target) else ""
to_add = "import sys; sys.path.extend(%r)"%paths
if to_add not in f:
log.info("patching %s by appending '%s'"%(target, to_add))
open(target, 'a').write('\n' + to_add)
else:
log.info("%s already patched"%target)
raise "I'm manually modifying sitecustomize.py to include ~/.local/python.... -- see previous install; don't understand why this is needed."
def unextend_sys_path(self):
raise RuntimeError("this is a VERY bad idea -- see https://groups.google.com/forum/#!topic/sage-release/MGkb_-y-moM")
for f in ["local/lib/python/sitecustomize.py", "local/lib/python/sitecustomize.pyc"]:
target = self.path(f)
log.info(target)
if os.path.exists(target):
log.info("removing %s"%target)
os.unlink(target)
def install_pip_packages(self, upgrade=True):
"""Install each pip-installable package."""
self.unextend_sys_path()
os.environ['PROJ_DIR']=os.environ['NETCDF4_DIR']=os.environ['HDF5_DIR']='/usr/'
os.environ['C_INCLUDE_PATH']='/usr/lib/openmpi/include'
os.environ['HDF5_DIR']='/usr/lib/x86_64-linux-gnu/hdf5/serial/' # needed for tables -- right path at least for ubuntu 15.04
# for these, see https://github.com/Unidata/netcdf4-python/issues/341
os.environ['USE_NCCONFIG']='0'
os.environ['HDF5_LIBDIR']='/usr/lib/x86_64-linux-gnu/hdf5/serial'
os.environ['HDF5_INCDIR']='/usr/include/hdf5/serial'
os.environ['NETCDF4_DIR']='/usr'
for package in SAGE_PIP_PACKAGES:
log.info("** Installing/upgrading %s **"%package)
# NOTE: the "--no-deps" is critical below; otherwise, pip will do things like install a version of numpy that is
# much newer than the one in Sage, and incompatible (due to not having patches), which if it installs at all, will
# break Sage (i.e. lots of doctests fail, etc.).
e = ' '.join(["%s=%s"%x for x in SAGE_PIP_PACKAGES_ENV[package].items()]) if package in SAGE_PIP_PACKAGES_ENV else ''
self.cmd("%s pip install %s --no-deps %s"%(e, '--upgrade' if upgrade else '', package))
for package in SAGE_PIP_PACKAGES_DEPS:
log.info("** Installing/upgrading %s **"%package)
e = ' '.join(["%s=%s"%x for x in SAGE_PIP_PACKAGES_ENV[package].items()]) if package in SAGE_PIP_PACKAGES_ENV else ''
self.cmd("%s pip install %s %s"%(e, '--upgrade' if upgrade else '', package))
def install_pymc(self):
self.cmd("pip install git+https://github.com/pymc-devs/pymc")
def install_R_packages(self):
s = ','.join(['"%s"'%name for name in R_PACKAGES])
c = 'install.packages(c(%s), repos="https://cran.fhcrc.org/")'%s
self.cmd("echo '%s' | R --no-save"%c)
def install_R_bioconductor(self):
c = 'source("http://bioconductor.org/biocLite.R"); biocLite()'
self.cmd("echo '%s' | R --no-save"%c)
c = 'library(BiocInstaller); biocLite(c("geneplotter", "limma", "puma", "affy", "edgeR", "BitSeq", "hgu95av2cdf", "hgu133plus2cdf", "affyPLM", "ddCt"))'
self.cmd("echo '%s' | R --no-save"%c)
def install_rstan(self):
"""
Install the Rstan package into R.
"""
c = 'install.packages(c("rstan"), repos="https://cran.fhcrc.org/", dependencies = TRUE)'
self.cmd("echo '%s' | R --no-save"%c)
def install_pystan(self):
# follow directions here: https://github.com/stan-dev/pystan
self.cmd(r"""cd /tmp && rm -rf pystan && git clone --recursive https://github.com/stan-dev/pystan.git && cd pystan && python setup.py install && rm -rf /tmp/pystan""")
def install_optional_packages(self, skip=[]):
from sage.all import install_package
if 'MAKE' not in os.environ:
# some packages, e.g., chomp, won't build without MAKE being set.
os.environ['MAKE'] = "make -j%s"%NCPU
for package in SAGE_OPTIONAL_PACKAGES:
if package in skip:
log.info("** Skipping %s **"%package)
continue
log.info("** Installing/upgrading %s **"%package)
#install_package(package)
# We have to do this (instead of use install_package) because Sage's install_package
# command is completely broken in rc0 at least (April 27, 2014).
self.cmd("sage -p %s"%package)
# We also have to do a "sage -b", since some optional packages don't get fully installed
# until rebuilding Cython modules. I posted to sage-devel about this bug on Aug 4.
self.cmd("sage -b")
# deprecated because it now says: "The EPD subscriber repository is only available to subscribers."
def DEPRECATED_install_enthought_packages(self):
"""
Like Sage does, Enthought has a bunch of packages that are not easily available
from pypi...
"""
# We grab the list of tarball names from the website, so we can determine
# the newest version of each that we want below.
repo = 'https://www.enthought.com/repo/ets/'
packages = [x.split('"')[1] for x in urllib2.urlopen(repo).readlines() if '.tar.gz"' in x]
for pkg in ENTHOUGHT_PACKAGES:
v = [x for x in packages if x.lower().startswith(pkg)]
v.sort()
newest = v[-1]
log.info("installing %s..."%newest)
download(os.path.join(repo, newest))
path = extract_package(newest)
cmd("python setup.py install", path)
def install_quantlib(self):
# See http://sourceforge.net/projects/quantlib/
VERSION = "1.5"
try:
# check if already installed
import QuantLib
if QuantLib.__version__ == VERSION:
log.info("QuantLib version %s is already installed"%VERSION)
return
except:
pass
pkg = "QuantLib-SWIG-%s.tar.gz"%VERSION
url = "http://downloads.sourceforge.net/project/quantlib/QuantLib/%s/other%%20languages/%s"%(VERSION, pkg)
# I got this url from the "direct link" think in source forge. I don't know if is stable over time; if not... Bummer.
url +="?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fquantlib%2Ffiles%2FQuantLib%2F1.4%2Fother%2520languages%2F&ts=1398645275&use_mirror=softlayer-dal"
download(url)
path = extract_package(pkg)
cmd("./configure", path)
cmd("make -j%s -C Python install"%NCPU, path)
def install_neuron(self):
"""
Neuron -- for empirically-based simulations of neurons and networks of neurons
(requested by Jose Guzman)
"""
def clean_up():
if os.path.exists('/tmp/iv'): shutil.rmtree("/tmp/iv")
if os.path.exists('/tmp/nrn'): shutil.rmtree("/tmp/nrn")
from sage.all import SAGE_LOCAL
clean_up()
cmd("hg clone http://www.neuron.yale.edu/hg/neuron/iv", "/tmp")
cmd("hg clone http://www.neuron.yale.edu/hg/neuron/nrn", "/tmp")
cmd("./build.sh && ./configure --prefix=%s && make -j%s && make install"%(SAGE_LOCAL, NCPU), "/tmp/iv")
cmd("./build.sh && ./configure --prefix=%s --with-iv=%s --with-nrnpython && make -j%s && make install && cd src/nrnpython/ && python setup.py install"%(SAGE_LOCAL, SAGE_LOCAL, NCPU), "/tmp/nrn")
clean_up()
def install_basemap(self):
"""
basemap -- Plot data on map projections with matplotlib
"""
try:
import mpl_toolkits.basemap
installed_version = mpl_toolkits.basemap.__version__
version = [x for x in urllib2.urlopen("https://raw.githubusercontent.com/matplotlib/basemap/master/setup.py").readlines()
if x.startswith('__version__')][0].split('=')[1].strip(' \'"\n')
log.info("version=%s, installed_version=%s", version, installed_version)
if version == installed_version:
log.info("basemap version %s already installed", version)
return
except Exception, msg:
pass
cmd("/usr/bin/git clone [email protected]:matplotlib/basemap.git", "/tmp")
cmd("python setup.py install", "/tmp/basemap")
shutil.rmtree("/tmp/basemap")
def install_pydelay(self):
"""
Install pydelay -- a program which translates a system of delay differential equations (DDEs) into simulation C-code and compiles and runs the code (using scipy weave). -- see http://pydelay.sourceforge.net/
Requested for UCLA by Jane Shevtsov: https://plus.google.com/115360165819500279592/posts/73vK9Pw4W6g
"""
cmd("umask 022 && cd /tmp/ && rm -rf pydelay* && wget http://downloads.sourceforge.net/project/pydelay/pydelay-0.1.1.tar.gz && tar xf pydelay-0.1.1.tar.gz && cd pydelay-0.1.1 && python setup.py install && rm -rf /tmp/pydelay*")
def install_gdal(self):
"""
Install GDAL -- for geospatial imaging.
"""
# The make; make -j8 below instead of just make is because the first make mysteriously gives an error on
# exit, but running it again seems to work fine.
GDAL_VERSION = '2.0.0' # options here -- http://download.osgeo.org/gdal/CURRENT/
cmd("umask 022 && unset MAKE && cd /tmp && export V=%s && rm -rf gdal-$V* && wget http://download.osgeo.org/gdal/CURRENT/gdal-$V.tar.xz && tar xf gdal-$V.tar.xz && cd gdal-$V && export CXXFLAGS=-I/usr/include/mpi/ && ./configure --with-python --prefix=$SAGE_ROOT/local && unset SHELL && make -j8; make && cd swig/python && python setup.py install && cd ../.. && make install && cd /tmp && rm -rf gdal-$V*"%GDAL_VERSION)
def install_stein_watkins(self):
# The package itself is "sage -i database_stein_watkins"
cmd("umask 022 && cd /usr/local/sage/current/data && rm -f stein_watkins stein-watkins-ecdb && ln -sf /usr/local/sage/stein-watkins-ecdb stein-watkins-ecdb && ln -sf /usr/local/sage/stein-watkins-ecdb stein_watkins")
def install_4ti2(self):
"""
"""
site = "http://www.4ti2.de/"
target = [x for x in urllib2.urlopen("%s/download_4ti2.html"%site).readlines() if 'source code</a>' in x][0].split('"')[1]
version = target.split("_")[1].split('/')[0]
z = [x for x in sorted(os.listdir(self.path("local/var/lib/sage/installed"))) if x.startswith('4ti2')]
if len(z) == 0:
installed_version = ''
else:
installed_version = z[-1].split('-')[1]
if version == installed_version:
log.info("4ti2 version %s already installed", version)
return
download(site + target)
pkg = target.split('/')[-1]
path = extract_package(pkg)
cmd("./configure --prefix=/usr/local/sage/current/local/ && time make -j%s"%NCPU, path)
cmd("make install", path)
open(self.path("local/var/lib/sage/installed/4ti2-%s"%version),'w')
shutil.rmtree(path)
def clean_up(self):
# clean up packages downloaded and extracted using the download command
src = os.path.join(os.environ['HOME'], 'salvus', 'salvus', 'src')
for s in os.listdir(src):
if s != 'patches':
target = os.path.join(src, s)
log.info("removing %s"%target)
os.unlink(target)
build = os.path.join(os.environ['HOME'], 'salvus', 'salvus', 'data', 'build')
for s in os.listdir(build):
target = os.path.join(build, s)
log.info("removing %s"%target)
shutil.rmtree(target)
# clean up packages left over from optional Sage package installs
# This should be a make target, but isn't (in sage-6.2, at least).
for p in ['upstream', 'local/var/tmp/sage/build']:
path = self.path(p)
if os.path.exists(path):
log.info("deleting %s"%path)
shutil.rmtree(path)
def fix_permissions(self):
self.cmd("chmod a+r -R .; find . -perm /u+x -execdir chmod a+x {} \;")
| gpl-3.0 |
Obus/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py | 10 | 2100 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_bitset_memoryview,
in_bitset_memoryview,
set_raw_bitset_from_binned_bitset
)
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
@pytest.mark.parametrize("values_to_insert, expected_bitset", [
([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)),
([31, 32, 33, 79], np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32))
])
def test_set_get_bitset(values_to_insert, expected_bitset):
n_32bits_ints = 3
bitset = np.zeros(n_32bits_ints, dtype=np.uint32)
for value in values_to_insert:
set_bitset_memoryview(bitset, value)
assert_allclose(expected_bitset, bitset)
for value in range(32 * n_32bits_ints):
if value in values_to_insert:
assert in_bitset_memoryview(bitset, value)
else:
assert not in_bitset_memoryview(bitset, value)
@pytest.mark.parametrize(
"raw_categories, binned_cat_to_insert, expected_raw_bitset", [
([3, 4, 5, 10, 31, 32, 43],
[0, 2, 4, 5, 6],
[2**3 + 2**5 + 2**31, 2**0 + 2**11]),
([3, 33, 50, 52],
[1, 3],
[0, 2**1 + 2**20]),
]
)
def test_raw_bitset_from_binned_bitset(raw_categories, binned_cat_to_insert,
expected_raw_bitset):
binned_bitset = np.zeros(2, dtype=np.uint32)
raw_bitset = np.zeros(2, dtype=np.uint32)
raw_categories = np.asarray(raw_categories, dtype=X_DTYPE)
for val in binned_cat_to_insert:
set_bitset_memoryview(binned_bitset, val)
set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset,
raw_categories)
assert_allclose(expected_raw_bitset, raw_bitset)
for binned_cat_val, raw_cat_val in enumerate(raw_categories):
if binned_cat_val in binned_cat_to_insert:
assert in_bitset_memoryview(raw_bitset, raw_cat_val)
else:
assert not in_bitset_memoryview(raw_bitset, raw_cat_val)
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
felipebetancur/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
gef756/statsmodels | statsmodels/distributions/mixture_rvs.py | 27 | 9592 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
mountaindust/Parasitoids | tests/test_Bayes.py | 1 | 9428 | #! /usr/bin/env python3
'''Test module for Data_Import and Bayes_funcs
Author: Christopher Strickland
Email: [email protected]
'''
import pytest
import numpy as np
import pandas as pd
from scipy import sparse
from matplotlib.path import Path
from Data_Import import LocInfo
from Run import Params
import Bayes_funcs as Bayes
from conftest import data_avail
###############################################################################
# #
# Test Fixtures #
# #
###############################################################################
@pytest.fixture(scope="module")
def locinfo(domain_info):
# kalbar info
loc_name = 'kalbar'
center = (-27.945752,152.58474)
return LocInfo(loc_name,center,domain_info)
###############################################################################
# #
# Tests #
# #
###############################################################################
def test_LocInfo(locinfo):
'''Test initialization of LocInfo object'''
### Field boundary information ###
# field_polys should be a dict of Path objects
assert type(locinfo.field_polys) is dict
# Fields: A, B, C, D, E, F, G
assert type(locinfo.field_polys['A']) is Path
assert len(locinfo.field_polys) == 7
# field_cells should be a dict of lists
assert type(locinfo.field_cells) is dict
assert isinstance(locinfo.field_cells['A'],np.ndarray)
assert len(locinfo.field_cells) == 7
# field_sizes should be a dict of cell counts
assert type(locinfo.field_sizes) is dict
assert type(locinfo.field_sizes['A']) is int
assert len(locinfo.field_sizes) == 7
### Release field grid info ###
assert isinstance(locinfo.grid_data,pd.DataFrame)
for key in ['xcoord','ycoord','samples','collection']:
assert key in locinfo.grid_data.keys()
assert isinstance(locinfo.grid_cells,np.ndarray)
assert locinfo.grid_cells.shape[1] == 2
assert locinfo.grid_data['xcoord'].size == locinfo.grid_cells.shape[0]
# Less trivial testing for the above is accomplished by running
# Plot_SampleLocations.py which plots the fields and polys together
### Sentinel field emergence data ###
assert isinstance(locinfo.release_date,pd.Timestamp)
assert isinstance(locinfo.collection_datesPR,list)
assert isinstance(locinfo.collection_datesPR[0],pd.Timedelta)
assert locinfo.collection_datesPR[0] > pd.Timedelta('0 days')
assert isinstance(locinfo.sent_DataFrames[0],pd.DataFrame)
for key in ['id','datePR','E_total','All_total']:
assert key in locinfo.sent_DataFrames[0].keys()
assert np.all(locinfo.sent_DataFrames[0]['E_total'].values <=
locinfo.sent_DataFrames[0]['All_total'].values)
# test that we have the field cell info for all the sentinel field data
for key in locinfo.sent_ids:
assert key in locinfo.field_cells.keys()
# Test emergence post release dates
minTimedelta = locinfo.collection_datesPR[0]
for Td in locinfo.sent_DataFrames[0]['datePR']:
assert Td >= minTimedelta
### Release field emergence data ###
assert isinstance(locinfo.releasefield_id,str)
for key in ['row','column','xcoord','ycoord','datePR','E_total','All_total']:
assert key in locinfo.release_DataFrames[0].keys()
for coord in locinfo.release_DataFrames[0][['xcoord','ycoord']].values:
assert coord in locinfo.grid_data[['xcoord','ycoord']].values
assert np.all(locinfo.release_DataFrames[0]['E_total'].values <=
locinfo.release_DataFrames[0]['All_total'].values)
for Td in locinfo.release_DataFrames[0]['datePR']:
assert Td >= minTimedelta
grid_cells_list = locinfo.grid_cells.tolist()
for cell in locinfo.release_DataFrames[0][['row','column']].values.tolist():
assert cell in grid_cells_list
assert tuple(cell) in locinfo.emerg_grids[0]
### Grid observation data ###
assert isinstance(locinfo.grid_obs_DataFrame,pd.DataFrame)
assert isinstance(locinfo.grid_obs_datesPR,list)
assert isinstance(locinfo.grid_obs_datesPR[0],pd.Timedelta)
assert isinstance(locinfo.grid_obs,np.ndarray)
assert isinstance(locinfo.grid_samples,np.ndarray)
assert np.all(locinfo.grid_obs.shape == locinfo.grid_samples.shape)
assert locinfo.grid_samples.max() == 1
# grid_obs should not be all zeros, asssuming something was seen
assert locinfo.grid_obs.max() > 0
### Cardinal direction data ###
assert isinstance(locinfo.card_obs_DataFrames,list)
assert isinstance(locinfo.card_obs_DataFrames[0],pd.DataFrame)
assert isinstance(locinfo.card_obs_datesPR,list)
assert isinstance(locinfo.card_obs_datesPR[0],pd.Timedelta)
assert isinstance(locinfo.step_size,list)
assert isinstance(locinfo.card_obs,list)
assert isinstance(locinfo.card_obs[0],np.ndarray)
assert len(locinfo.card_obs_DataFrames) == len(locinfo.card_obs_datesPR)\
== len(locinfo.step_size) == len(locinfo.card_obs)
for c_obs in locinfo.card_obs:
assert c_obs.shape[0] == 4
### PyMC friendly data structures ###
# these primarily need to be verfied against model output, so we will test
# them there.
@data_avail
def test_model_emergence(locinfo,modelsol):
'''Test the translation of population model results to emergence information,
and how this compares with the PyMC friendly data structures in LocInfo'''
release_emerg,sentinel_emerg = Bayes.popdensity_to_emergence(modelsol,locinfo)
# This process results in two lists, release_emerg and sentinel_emerg.
# Each list entry corresponds to a data collection day (one array)
# In each array:
# Each column corresponds to an emergence observation day (as in data)
# Each row corresponds to a grid point or sentinel field, respectively
# These lists are emergence potential as in wasp population numbers.
# To get observed emergence, collection and oviposition rate must be
# modeled. But this is done in Bayesian fashion and won't be reproduced here.
# Regardless, these numbers are now considered independent variables and
# should match the general shape of the data stored in locinfo.
assert isinstance(release_emerg,list)
for ii in range(len(release_emerg)):
n_grid_pts, n_obs = release_emerg[ii].shape
# test shape against data info
assert n_grid_pts == len(locinfo.emerg_grids[ii])
assert n_obs == len(locinfo.release_DataFrames[ii]['datePR'].unique())
# test shape against locinfo ndarrays
assert n_grid_pts == locinfo.release_emerg[ii].shape[0]
assert n_obs == locinfo.release_emerg[ii].shape[1]
assert n_grid_pts == locinfo.release_collection[ii].size
n_fields, n_obs = sentinel_emerg[ii].shape
# test shape against data info
assert n_fields == len(locinfo.sent_ids)
assert n_obs == len(locinfo.sent_DataFrames[ii]['datePR'].unique())
# test shape against locinfo ndarray
assert n_fields == sentinel_emerg[ii].shape[0]
assert n_obs == sentinel_emerg[ii].shape[1]
# make sure that the grid points match from model to data
for n,cell in enumerate(locinfo.emerg_grids[ii]):
for day in locinfo.release_DataFrames[ii]['datePR'].unique():
assert tuple(locinfo.release_DataFrames[ii]
[locinfo.release_DataFrames[ii]['datePR']==day]
[['row','column']].values[n,:]) == cell
# same for sentinel fields
for n,field in enumerate(locinfo.sent_ids):
for day in locinfo.sent_DataFrames[ii]['datePR'].unique():
assert locinfo.sent_DataFrames[ii]\
[locinfo.sent_DataFrames[ii]['datePR']==day]\
['id'].values[n] == field
# release_collection should be relative numbers
assert locinfo.release_collection[ii].max() == 1
assert locinfo.release_collection[ii].min() >= 0
@data_avail
def test_model_sampling(locinfo,modelsol,domain_info):
'''Test the translation of population model results to the PyMC friendly
data structures in LocInfo'''
grid_counts = Bayes.popdensity_grid(modelsol,locinfo)
card_counts = Bayes.popdensity_card(modelsol,locinfo,domain_info)
# grid_counts should be comparable to locinfo.grid_obs and locinfo.grid_samples
assert np.all(grid_counts.shape == locinfo.grid_obs.shape ==
locinfo.grid_samples.shape)
# they should be non-negative, and something should be > 0
assert grid_counts.max() > 0
assert grid_counts.min() >= 0
# each entry in card_counts should match each cooresponding entry in
# locinfo.card_obs
for nobs,obs in enumerate(locinfo.card_obs):
assert np.all(obs.shape == card_counts[nobs].shape)
# Simulation should be >= 0, with a max > 0
card_counts[nobs].max() > 0
card_counts[nobs].min() >= 0 | gpl-3.0 |
mne-tools/mne-tools.github.io | 0.19/_downloads/3a255427c290a7e906384462001b6d1f/plot_brainstorm_phantom_elekta.py | 10 | 6810 | # -*- coding: utf-8 -*-
"""
.. _tut-brainstorm-elekta-phantom:
==========================================
Brainstorm Elekta phantom dataset tutorial
==========================================
Here we compute the evoked from raw for the Brainstorm Elekta phantom
tutorial dataset. For comparison, see [1]_ and:
https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# sphinx_gallery_thumbnail_number = 9
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path(verbose=True)
subject = 'sample'
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG1933', 'MEG2421']
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:
raw.plot_psd(tmax=30., average=False)
###############################################################################
# Our phantom produces sinusoidal bursts at 20 Hz:
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
# we can also decimate our data to save memory.
tmin, tmax = -0.1, 0.1
bmax = -0.05 # Avoid capture filter ringing into baseline
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, bmax),
preload=False)
epochs['1'].average().plot(time_unit='s')
###############################################################################
# .. _plt_brainstorm_phantom_elekta_eeg_sphere_geometry:
#
# Let's use a :ref:`sphere head geometry model <eeg_sphere_model>`
# and let's see the coordinate alignment and the sphere location. The phantom
# is properly modeled by a single-shell sphere with origin (0., 0., 0.).
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
mne.viz.plot_alignment(epochs.info, subject=subject, show_axes=True,
bem=sphere, dig=True, surfaces='inner_skull')
###############################################################################
# Let's do some dipole fits. We first compute the noise covariance,
# then do the fits for each event_id taking the time instant that maximizes
# the global field power.
# here we can get away with using method='oas' for speed (faster than "shrunk")
# but in general "shrunk" is usually better
cov = mne.compute_covariance(epochs, tmax=bmax)
mne.viz.plot_evoked_white(epochs['1'].average(), cov)
data = []
t_peak = 0.036 # true for Elekta phantom
for ii in event_id:
# Avoid the first and last trials -- can contain dipole-switching artifacts
evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs
dip, residual = fit_dipole(evoked, cov, sphere, n_jobs=1)
###############################################################################
# Do a quick visualization of how much variance we explained, putting the
# data and residuals on the same scale (here the "time points" are the
# 32 dipole peak values that we fit):
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
###############################################################################
# Now we can compare to the actual locations, taking the difference in mm:
actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()
actual_amp = 100. # nAm
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('mean(position error) = %0.1f mm' % (np.mean(diffs),))
ax1.bar(event_id, diffs)
ax1.set_xlabel('Dipole index')
ax1.set_ylabel('Loc. error (mm)')
angles = np.rad2deg(np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1))))
print(u'mean(angle error) = %0.1f°' % (np.mean(angles),))
ax2.bar(event_id, angles)
ax2.set_xlabel('Dipole index')
ax2.set_ylabel(u'Angle error (°)')
amps = actual_amp - dip.amplitude / 1e-9
print('mean(abs amplitude error) = %0.1f nAm' % (np.mean(np.abs(amps)),))
ax3.bar(event_id, amps)
ax3.set_xlabel('Dipole index')
ax3.set_ylabel('Amplitude error (nAm)')
fig.tight_layout()
plt.show()
###############################################################################
# Let's plot the positions and the orientations of the actual and the estimated
# dipoles
actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance
actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance
dip_true = \
mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof)
fig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces='inner_skull',
coord_frame='head', meg='helmet', show_axes=True)
# Plot the position and the orientation of the actual dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='arrow',
subject=subject, color=(0., 0., 0.),
fig=fig)
# Plot the position and the orientation of the estimated dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='arrow', subject=subject,
color=(0.2, 1., 0.5), fig=fig)
mne.viz.set_3d_view(figure=fig, azimuth=70, elevation=80, distance=0.5)
| bsd-3-clause |
apache/incubator-superset | superset/reports/commands/alert.py | 1 | 4346 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from operator import eq, ge, gt, le, lt, ne
from typing import Optional
import numpy as np
from flask_babel import lazy_gettext as _
from superset import jinja_context
from superset.commands.base import BaseCommand
from superset.models.reports import ReportSchedule, ReportScheduleValidatorType
from superset.reports.commands.exceptions import (
AlertQueryError,
AlertQueryInvalidTypeError,
AlertQueryMultipleColumnsError,
AlertQueryMultipleRowsError,
AlertValidatorConfigError,
)
logger = logging.getLogger(__name__)
OPERATOR_FUNCTIONS = {">=": ge, ">": gt, "<=": le, "<": lt, "==": eq, "!=": ne}
class AlertCommand(BaseCommand):
def __init__(self, report_schedule: ReportSchedule):
self._report_schedule = report_schedule
self._result: Optional[float] = None
def run(self) -> bool:
self.validate()
if self._report_schedule.validator_type == ReportScheduleValidatorType.NOT_NULL:
self._report_schedule.last_value_row_json = str(self._result)
return self._result not in (0, None, np.nan)
self._report_schedule.last_value = self._result
try:
operator = json.loads(self._report_schedule.validator_config_json)["op"]
threshold = json.loads(self._report_schedule.validator_config_json)[
"threshold"
]
return OPERATOR_FUNCTIONS[operator](self._result, threshold)
except (KeyError, json.JSONDecodeError):
raise AlertValidatorConfigError()
def _validate_not_null(self, rows: np.recarray) -> None:
self._validate_result(rows)
self._result = rows[0][1]
@staticmethod
def _validate_result(rows: np.recarray) -> None:
# check if query return more then one row
if len(rows) > 1:
raise AlertQueryMultipleRowsError(
message=_(
"Alert query returned more then one row. %s rows returned"
% len(rows),
)
)
# check if query returned more then one column
if len(rows[0]) > 2:
raise AlertQueryMultipleColumnsError(
# len is subtracted by 1 to discard pandas index column
_(
"Alert query returned more then one column. %s columns returned"
% (len(rows[0]) - 1)
)
)
def _validate_operator(self, rows: np.recarray) -> None:
self._validate_result(rows)
if rows[0][1] is None:
return
try:
# Check if it's float or if we can convert it
self._result = float(rows[0][1])
return
except (AssertionError, TypeError, ValueError):
raise AlertQueryInvalidTypeError()
def validate(self) -> None:
"""
Validate the query result as a Pandas DataFrame
"""
sql_template = jinja_context.get_template_processor(
database=self._report_schedule.database
)
rendered_sql = sql_template.process_template(self._report_schedule.sql)
try:
df = self._report_schedule.database.get_df(rendered_sql)
except Exception as ex:
raise AlertQueryError(message=str(ex))
if df.empty:
return
rows = df.to_records()
if self._report_schedule.validator_type == ReportScheduleValidatorType.NOT_NULL:
self._validate_not_null(rows)
return
self._validate_operator(rows)
| apache-2.0 |
ningchi/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
informatics-isi-edu/deriva-py | tests/deriva/core/test_datapath.py | 1 | 35124 | # Tests for the datapath module.
#
# Environment variables:
# DERIVA_PY_TEST_HOSTNAME: hostname of the test server
# DERIVA_PY_TEST_CREDENTIAL: user credential, if none, it will attempt to get credentail for given hostname
# DERIVA_PY_TEST_VERBOSE: set for verbose logging output to stdout
from copy import deepcopy
import logging
from operator import itemgetter
import os
import unittest
import sys
from deriva.core import DerivaServer, get_credential, ermrest_model as _em, __version__
from deriva.core.datapath import DataPathException, Min, Max, Sum, Avg, Cnt, CntD, Array, ArrayD, Bin
# unittests did not support 'subTests' until 3.4
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
HAS_SUBTESTS = False
else:
HAS_SUBTESTS = True
# unittests did not support 'assertWarns' until 3.2
if sys.version_info[0] < 3 or sys.version_info[1] < 2:
HAS_ASSERTWARNS = False
else:
HAS_ASSERTWARNS = True
try:
from pandas import DataFrame
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
TEST_EXP_MAX = 100
TEST_EXPTYPE_MAX = 10
TEST_EXP_NAME_FORMAT = "experiment-{}"
TEST_PROJ_MAX = 1
TEST_PROJ_INVESTIGATOR = "Smith"
TEST_PROJ_NUM = 1
SPECIAL_CHARACTERS = '`~!@#$%^&*()_+-={}|[]\\;:"\',./<>?'
INVALID_IDENTIFIER, INVALID_IDENTIFIER_FIXED = '9 %$ ', '_9____'
RESERVED_IDENTIFIER = 'column_definitions'
CONFLICTING_IDENTIFIER, CONFLICTING_IDENTIFIER_FIXED = RESERVED_IDENTIFIER + '1', RESERVED_IDENTIFIER + '2'
SNAME_ISA = 'ISA'
SNAME_VOCAB = 'Vocab'
TNAME_PROJECT = 'Project'
TNAME_EXPERIMENT = 'Experiment'
TNAME_EXPERIMENT_TYPE = 'Experiment_Type'
TNAME_EXPERIMENT_COPY = 'Experiment_Copy'
hostname = os.getenv("DERIVA_PY_TEST_HOSTNAME")
logger = logging.getLogger(__name__)
if os.getenv("DERIVA_PY_TEST_VERBOSE"):
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def define_test_schema(catalog):
"""Defines the test schema.
A 'vocab' schema with an 'experiment_type' term table.
An 'isa' schema with an 'experiment' table, with 'type' that references the vocab table.
"""
model = catalog.getCatalogModel()
vocab = model.create_schema(_em.Schema.define(SNAME_VOCAB))
vocab.create_table(_em.Table.define_vocabulary(TNAME_EXPERIMENT_TYPE, "TEST:{RID}"))
isa = model.create_schema(_em.Schema.define(SNAME_ISA))
# create TNAME_PROJECT table
table_def = _em.Table.define(
TNAME_PROJECT,
column_defs=[
_em.Column.define(cname, ctype) for (cname, ctype) in [
('Investigator', _em.builtin_types.text),
('Num', _em.builtin_types.int4),
(INVALID_IDENTIFIER, _em.builtin_types.int4),
(RESERVED_IDENTIFIER, _em.builtin_types.text),
(RESERVED_IDENTIFIER + '1', _em.builtin_types.text)
]
],
key_defs=[
_em.Key.define(['Investigator', 'Num'])
]
)
isa.create_table(table_def)
# create TNAME_EXPERIMENT table
table_def = _em.Table.define(
TNAME_EXPERIMENT,
column_defs=[
_em.Column.define(cname, ctype) for (cname, ctype) in [
('Name', _em.builtin_types.text),
('Amount', _em.builtin_types.int4),
('Time', _em.builtin_types.timestamptz),
('Type', _em.builtin_types.text),
('Project Investigator', _em.builtin_types.text),
('Project_Num', _em.builtin_types.int4),
('Empty', _em.builtin_types.int4)
]
],
key_defs=[
_em.Key.define(['Name'])
],
fkey_defs=[
_em.ForeignKey.define(['Type'], SNAME_VOCAB, TNAME_EXPERIMENT_TYPE, ['ID']),
_em.ForeignKey.define(['Project Investigator', 'Project_Num'], SNAME_ISA, TNAME_PROJECT, ['Investigator', 'Num'])
]
)
isa.create_table(table_def)
# create copy of TNAME_EXPERIMENT table
table_def['table_name'] = TNAME_EXPERIMENT_COPY
isa.create_table(table_def)
def _generate_experiment_entities(types, count):
"""Generates experiment entities (content only)
:param types: type entities to be referenced from entities
:param count: number of entities to return
:return: a list of dict objects (experiment entities)
"""
return [
{
"Name": TEST_EXP_NAME_FORMAT.format(i),
"Amount": i,
"Time": "2018-01-{}T01:00:00.0".format(1 + (i % 31)),
"Type": types[i % TEST_EXPTYPE_MAX]['ID'],
"Project Investigator": TEST_PROJ_INVESTIGATOR,
"Project_Num": TEST_PROJ_NUM,
"Empty": None
}
for i in range(count)
]
def populate_test_catalog(catalog):
"""Populate the test catalog."""
paths = catalog.getPathBuilder()
logger.debug("Inserting project...")
logger.debug("Inserting experiment types...")
proj_table = paths.schemas[SNAME_ISA].tables[TNAME_PROJECT]
proj_table.insert([
{"Investigator": TEST_PROJ_INVESTIGATOR, "Num": TEST_PROJ_NUM}
])
type_table = paths.schemas[SNAME_VOCAB].tables[TNAME_EXPERIMENT_TYPE]
types = type_table.insert([
{"Name": "{}".format(name), "Description": "NA"} for name in range(TEST_EXPTYPE_MAX)
], defaults=['ID', 'URI'])
logger.debug("Inserting experiments...")
exp = paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT]
exp.insert(_generate_experiment_entities(types, TEST_EXP_MAX))
@unittest.skipUnless(hostname, "Test host not specified")
class DatapathTests (unittest.TestCase):
catalog = None
@classmethod
def setUpClass(cls):
logger.debug("setupUpClass begin")
credential = os.getenv("DERIVA_PY_TEST_CREDENTIAL") or get_credential(hostname)
server = DerivaServer('https', hostname, credential)
cls.catalog = server.create_ermrest_catalog()
try:
define_test_schema(cls.catalog)
populate_test_catalog(cls.catalog)
except Exception:
# on failure, delete catalog and re-raise exception
cls.catalog.delete_ermrest_catalog(really=True)
raise
logger.debug("setupUpClass done")
@classmethod
def tearDownClass(cls):
logger.debug("tearDownClass begin")
cls.catalog.delete_ermrest_catalog(really=True)
logger.debug("tearDownClass done")
def setUp(self):
self.paths = self.catalog.getPathBuilder()
self.project = self.paths.schemas[SNAME_ISA].tables[TNAME_PROJECT]
self.experiment = self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT]
self.experiment_type = self.paths.schemas[SNAME_VOCAB].tables[TNAME_EXPERIMENT_TYPE]
self.experiment_copy = self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT_COPY]
self.types = list(self.experiment_type.entities())
def tearDown(self):
try:
self.experiment_copy.path.delete()
except DataPathException:
# suppresses 404 errors when the table is empty
pass
def test_catalog_dir_base(self):
self.assertIn('schemas', dir(self.paths))
def test_schema_dir_base(self):
self.assertLess({'_name', 'tables', 'describe'}, set(dir(self.paths.schemas[SNAME_ISA])))
def test_datapath_dir_base(self):
self.assertLess({'aggregates', 'groupby', 'attributes', 'context', 'delete', 'entities', 'filter',
'link', 'table_instances', 'uri'}, set(dir(self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT].path)))
def test_table_dir_base(self):
self.assertLess({'aggregates', 'alias', 'groupby', 'attributes', 'describe', 'entities', 'filter', 'insert',
'link', 'path', 'update', 'uri'}, set(dir(self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT])))
def test_catalog_dir_with_schemas(self):
self.assertLess({SNAME_ISA, SNAME_VOCAB}, set(dir(self.paths)))
def test_schema_dir_with_tables(self):
self.assertIn(TNAME_EXPERIMENT, dir(self.paths.ISA))
def test_table_dir_with_columns(self):
self.assertLess({'Name', 'Amount', 'Time', 'Type'}, set(dir(self.paths.ISA.Experiment)))
def test_dir_path(self):
self.assertIn(TNAME_EXPERIMENT, dir(self.paths.ISA.Experiment.path))
def test_dir_invalid_identifier(self):
self.assertIn(INVALID_IDENTIFIER_FIXED, dir(self.project))
self.assertIsNotNone(getattr(self.project, INVALID_IDENTIFIER_FIXED))
def test_dir_conflicting_identifier(self):
self.assertIn(CONFLICTING_IDENTIFIER_FIXED, dir(self.project))
self.assertIsNotNone(getattr(self.project, CONFLICTING_IDENTIFIER))
self.assertIsNotNone(getattr(self.project, CONFLICTING_IDENTIFIER_FIXED))
def test_describe_schema(self):
with self.assertWarns(DeprecationWarning):
self.paths.schemas[SNAME_ISA].describe()
def test_describe_table(self):
with self.assertWarns(DeprecationWarning):
self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT].describe()
def test_describe_column(self):
with self.assertWarns(DeprecationWarning):
self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT].column_definitions['Name'].describe()
def test_unfiltered_fetch(self):
results = self.experiment.entities()
self.assertEqual(len(results), TEST_EXP_MAX)
def test_fetch_with_headers(self):
headers = {'User-Agent': __name__ + '/' + __version__}
results = self.experiment.entities().fetch(headers=headers)
self.assertEqual(len(results), TEST_EXP_MAX)
def test_fetch_with_limit(self):
results = self.experiment.entities()
limit = TEST_EXP_MAX / 5
results.fetch(limit=limit)
self.assertEqual(len(results), limit)
def test_fetch_with_sort(self):
results = self.experiment.entities()
results.sort(self.experiment.column_definitions['Amount'])
self.assertEqual(results[0]['Amount'], 0)
def test_fetch_attributes_with_sort(self):
results = self.experiment.attributes(self.experiment.RID, self.experiment.Amount)
results.sort(self.experiment.Amount)
self.assertEqual(results[0]['Amount'], 0)
def test_fetch_all_attributes_with_sort(self):
results = self.experiment.attributes(self.experiment)
results.sort(self.experiment.Amount)
self.assertEqual(results[0]['Amount'], 0)
def test_fetch_all_attributes_with_sort_desc(self):
results = self.experiment.attributes(self.experiment)
results.sort(self.experiment.Amount.desc)
self.assertEqual(results[0]['Amount'], TEST_EXP_MAX-1)
def test_fetch_from_path_attributes_with_sort_on_talias(self):
path = self.experiment.path
results = path.Experiment.attributes(path.Experiment.RID, path.Experiment.Amount)
results.sort(path.Experiment.Amount)
self.assertEqual(results[0]['Amount'], 0)
def test_fetch_from_path_attributes_with_sort_on_talias_desc(self):
path = self.experiment.path
results = path.Experiment.attributes(path.Experiment.RID, path.Experiment.Amount)
results.sort(path.Experiment.Amount.desc)
self.assertEqual(results[0]['Amount'], TEST_EXP_MAX-1)
def test_fetch_from_path_all_attributes_with_sort_on_talias(self):
path = self.experiment.path
results = path.Experiment.attributes(*path.Experiment.column_definitions.values())
results.sort(path.Experiment.Amount)
self.assertEqual(results[0]['Amount'], 0)
def test_fetch_from_path_all_attributes_with_sort_on_alias_desc(self):
path = self.experiment.path
results = path.Experiment.attributes(*path.Experiment.column_definitions.values())
results.sort(path.Experiment.Amount.desc)
self.assertEqual(results[0]['Amount'], TEST_EXP_MAX-1)
def test_fetch_all_cols_with_talias(self):
path = self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT].alias('X').path
results = path.attributes(path.X)
result = results.fetch(limit=1)[0]
self.assertIn('X:RID', result)
self.assertIn('X:Name', result)
def test_fetch_with_talias(self):
path = self.paths.schemas[SNAME_ISA].tables[TNAME_EXPERIMENT].alias('X').path
results = path.attributes(path.X.RID, path.X.Name.alias('typeName'))
result = results.fetch(limit=1)[0]
self.assertIn('RID', result)
self.assertIn('typeName', result)
def test_attribute_projection(self):
results = self.experiment.attributes(
self.experiment.column_definitions['Name'],
self.experiment.column_definitions['Amount']
)
result = results.fetch(limit=1)[0]
self.assertIn('Name', result)
self.assertIn('Amount', result)
def test_attribute_err_table_attr(self):
table_attr = ['_name', '_schema']
for attr in table_attr:
with self.assertRaises(TypeError):
self.experiment.attributes(getattr(self.experiment, attr))
def test_update_err_no_targets(self):
entities = [{'RID': 1234}]
with self.assertRaises(ValueError):
self.experiment.update(entities)
def test_aggregate_w_invalid_attributes(self):
with self.assertRaises(TypeError):
self.experiment.aggregates(Min(self.experiment.column_definitions['Amount']))
def test_aggregate_w_invalid_renames(self):
with self.assertRaises(TypeError):
self.experiment.aggregates(
self.experiment.column_definitions['Name'],
Min(self.experiment.column_definitions['Amount'])
)
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_aggregate_fns(self):
tests = [
('min_amount', Min, 0),
('max_amount', Max, TEST_EXP_MAX-1),
('sum_amount', Sum, sum(range(TEST_EXP_MAX))),
('avg_amount', Avg, sum(range(TEST_EXP_MAX))/TEST_EXP_MAX),
('cnt_amount', Cnt, TEST_EXP_MAX),
('cnt_d_amount', CntD, TEST_EXP_MAX),
('array_amount', Array, list(range(TEST_EXP_MAX))),
('array_d_amount', ArrayD, list(range(TEST_EXP_MAX)))
]
for name, Fn, value in tests:
with self.subTest(name=name):
# results = self.experiment.aggregates(**{name: Fn(self.experiment.column_definitions['Amount'])})
results = self.experiment.aggregates(Fn(self.experiment.column_definitions['Amount']).alias(name))
result = results.fetch()[0]
self.assertIn(name, result)
self.assertEqual(result[name], value)
def test_aggregate_w_2_fns(self):
results = self.experiment.aggregates(
Min(self.experiment.column_definitions['Amount']).alias('min_amount'),
Max(self.experiment.column_definitions['Amount']).alias('max_amount')
)
result = results.fetch()[0]
self.assertIn('min_amount', result)
self.assertEqual(result['min_amount'], 0)
self.assertIn('max_amount', result)
self.assertEqual(result['max_amount'], TEST_EXP_MAX-1)
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_aggregate_fns_array_star(self):
path = self.experiment.path
tests = [
('array_table_star', Array, self.experiment, self.experiment),
('array_alias_star', Array, path, path.Experiment),
('arrayd_table_star', ArrayD, self.experiment, self.experiment),
('arrayd_alias_star', ArrayD, path, path.Experiment)
]
for name, Fn, path, instance in tests:
results = path.aggregates(Fn(instance).alias('arr'))
with self.subTest(name=name):
result = results.fetch()[0]
self.assertIn('arr', result)
self.assertEqual(len(result['arr']), TEST_EXP_MAX)
self.assertIn('Time', result['arr'][0])
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_aggregate_fns_cnt_star(self):
path = self.experiment.path
tests = [
('cnt_table_star', Cnt, self.experiment, self.experiment),
('cnt_alias_star', Cnt, path, path.Experiment)
]
for name, Fn, path, instance in tests:
results = path.aggregates(Fn(instance).alias('cnt'))
with self.subTest(name=name):
result = results.fetch()[0]
self.assertIn('cnt', result)
self.assertEqual(result['cnt'], TEST_EXP_MAX)
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_attributegroup_fns(self):
tests = [
('one group key', [self.experiment.column_definitions['Type']]),
('two group keys', [self.experiment.column_definitions['Project_Num'], self.experiment.column_definitions['Type']]),
('aliased group key', [self.experiment.column_definitions['Type'].alias('The Type')])
]
for test_name, group_key in tests:
with self.subTest(name=test_name):
self._do_attributegroup_fn_subtests(group_key)
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def _do_attributegroup_fn_subtests(self, group_key):
"""Helper method for running common attributegroup subtests for different group keys."""
tests = [
('min_amount', Min, 0),
('max_amount', Max, TEST_EXP_MAX-TEST_EXPTYPE_MAX),
('sum_amount', Sum, sum(range(0, TEST_EXP_MAX, TEST_EXPTYPE_MAX))),
('avg_amount', Avg, sum(range(0, TEST_EXP_MAX, TEST_EXPTYPE_MAX))/TEST_EXPTYPE_MAX),
('cnt_amount', Cnt, TEST_EXPTYPE_MAX),
('cnt_d_amount', CntD, TEST_EXPTYPE_MAX),
('array_amount', Array, list(range(0, TEST_EXP_MAX, TEST_EXPTYPE_MAX))),
('array_d_amount', ArrayD, list(range(0, TEST_EXP_MAX, TEST_EXPTYPE_MAX)))
]
for name, Fn, value in tests:
with self.subTest(name=name):
results = self.experiment.groupby(*group_key).attributes(
Fn(self.experiment.column_definitions['Amount']).alias(name)).sort(*group_key)
result = results[0]
self.assertEqual(len(results), TEST_EXPTYPE_MAX)
self.assertTrue(all(key._name in result for key in group_key))
self.assertIn(name, result)
self.assertEqual(result[name], value)
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_attributegroup_w_bin(self):
tests = [
('min/max given', 0, TEST_EXP_MAX),
('min/max not given', None, None),
('min only given', 0, None),
('max only given', None, TEST_EXP_MAX)
]
for testname, minval, maxval in tests:
with self.subTest(name=testname):
self._do_bin_subtests(minval, maxval)
def _do_bin_subtests(self, minval, maxval):
"""Helper method for running common binning tests with & without min/max values."""
new_name, bin_name = 'TheProj', 'ABin'
nbins = int(TEST_EXP_MAX/20)
group_key = [
self.experiment.column_definitions['Project_Num'].alias(new_name),
Bin(self.experiment.column_definitions['Amount'], nbins, minval=minval, maxval=maxval).alias(bin_name)
]
tests = [
('min_amount', Min, lambda a, b: a >= b[1]),
('max_amount', Max, lambda a, b: a <= b[2]),
('sum_amount', Sum, lambda a, b: a >= b[1] + b[2]),
('avg_amount', Avg, lambda a, b: b[1] <= a <= b[2]),
('cnt_amount', Cnt, lambda a, b: a == TEST_EXP_MAX/nbins),
('cnt_d_amount', CntD, lambda a, b: a == TEST_EXP_MAX/nbins),
('array_amount', Array, lambda a, b: all(b[1] <= a_i <= b[2] for a_i in a)),
('array_d_amount', ArrayD, lambda a, b: all(b[1] <= a_i <= b[2] for a_i in a))
]
for name, Fn, compare in tests:
with self.subTest(name=name):
results = self.experiment.groupby(*group_key).attributes(
Fn(self.experiment.column_definitions['Amount']).alias(name)).fetch()
self.assertTrue(all(key._name in results[0] for key in group_key))
self.assertIn(name, results[0])
for result in results:
bin = result[bin_name]
if not maxval and (bin[0] >= nbins):
# skip the last 2 bins when maxval was resolved; those bins are not aligned like the others
continue
self.assertTrue(compare(result[name], bin))
@unittest.skipUnless(HAS_SUBTESTS, "This tests is not available unless running python 3.4+")
def test_attributegroup_w_bin_sort(self):
bin_name = 'bin'
nbins = int(TEST_EXP_MAX/20)
bin = Bin(self.experiment.column_definitions['Amount'], nbins, 0, TEST_EXP_MAX).alias(bin_name)
bin_desc = bin.desc
asc_fn = lambda n, a, b: a[n] <= b[n]
desc_fn = lambda n, a, b: a[n] >= b[n]
tests = [
('min_amount', Min, bin, asc_fn),
('max_amount', Max, bin, asc_fn),
('sum_amount', Sum, bin, asc_fn),
('avg_amount', Avg, bin, asc_fn),
('min_amount', Min, bin_desc, desc_fn),
('max_amount', Max, bin_desc, desc_fn),
('sum_amount', Sum, bin_desc, desc_fn),
('avg_amount', Avg, bin_desc, desc_fn)
]
for name, Fn, sort_key, compfn in tests:
with self.subTest(name=name):
results = self.experiment.groupby(bin).attributes(
Fn(self.experiment.column_definitions['Amount']).alias(name)).sort(sort_key).fetch()
self.assertIn(bin._name, results[0])
self.assertIn(name, results[0])
self.assertTrue(compfn(name, results[0], results[1]))
@unittest.skipUnless(HAS_SUBTESTS, "This test is not available unless running python 3.4+")
def test_attributegroup_w_bin_resolution(self):
binkey = self.experiment.column_definitions['Empty']
binname = 'bin'
tests = [
('min_max_valid', 0, 0, True),
('max_invalid', 0, None, False),
('min_invalid', None, 0, False),
('both_invalid', None, None, False)
]
for name, minval, maxval, valid in tests:
def _do_query():
bin = Bin(binkey, 10, minval, maxval).alias(binname)
return self.experiment.groupby(bin).attributes(Avg(binkey).alias(name)).fetch()
with self.subTest(name=name):
if valid:
results = _do_query()
self.assertIn(binname, results[0])
self.assertIn(name, results[0])
else:
with self.assertRaises(ValueError):
_do_query()
def test_link_implicit(self):
results = self.experiment.link(self.experiment_type).entities()
self.assertEqual(TEST_EXPTYPE_MAX, len(results))
def test_link_explicit_simple_key(self):
results = self.experiment.link(
self.experiment_type,
on=(self.experiment.Type == self.experiment_type.ID)
).entities()
self.assertEqual(TEST_EXPTYPE_MAX, len(results))
def test_link_explicit_composite_key(self):
path = self.experiment.link(
self.project,
on=(
(self.experiment.Project_Investigator == self.project.Investigator) &
(self.experiment.Project_Num == self.project.Num)
)
)
results = path.entities()
self.assertEqual(TEST_PROJ_MAX, len(results))
def test_filter_equality(self):
results = self.experiment.filter(
self.experiment.column_definitions['Name'] == TEST_EXP_NAME_FORMAT.format(1)
).entities()
self.assertEqual(len(results), 1)
def test_filter_inequality(self):
results = self.experiment.filter(
self.experiment.column_definitions['Amount'] < 10
).entities()
self.assertEqual(len(results), 10)
def test_filter_ciregexp(self):
results = self.experiment.filter(
self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:])
).entities()
self.assertEqual(len(results), 1)
def test_filter_negation(self):
results = self.experiment.filter(
~ (self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:]))
).entities()
self.assertEqual(len(results), TEST_EXP_MAX - 1)
def test_filter_conjunction(self):
results = self.experiment.filter(
self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:])
& (self.experiment.column_definitions['Amount'] == 0)
).entities()
self.assertEqual(len(results), 1)
def test_attribute_deprecated_rename(self):
with self.assertRaises(TypeError):
self.experiment.attributes(
self.experiment.column_definitions['Name'],
howmuch=self.experiment.column_definitions['Amount']
)
def test_attribute_rename(self):
results = self.experiment.attributes(
self.experiment.column_definitions['Name'],
self.experiment.column_definitions['Amount'].alias('How many of them ?'),
self.experiment.column_definitions['Project_Num'].alias('Project #')
)
result = results.fetch(limit=1)[0]
self.assertIn('Name', result)
self.assertIn('How many of them ?', result)
self.assertIn('Project #', result)
def test_attribute_rename_special_chars(self):
# first test with only the `:` character present which would trigger a lexical error from ermrest
special_character_out_alias = self.experiment._name + ':' + self.experiment.column_definitions['Name']._name
results = self.experiment.attributes(self.experiment.column_definitions['Name'].alias(special_character_out_alias))
result = results.fetch(limit=1)[0]
self.assertIn(special_character_out_alias, result)
# second test with url unsafe characters present which would trigger a bad request from the web server
special_character_out_alias = SPECIAL_CHARACTERS
results = self.experiment.attributes(self.experiment.column_definitions['Name'].alias(special_character_out_alias))
result = results.fetch(limit=1)[0]
self.assertIn(special_character_out_alias, result)
def test_context(self):
path = self.experiment.link(self.experiment_type)
results = path.Experiment.entities()
self.assertEqual(len(results), TEST_EXP_MAX)
def test_path_table_instances(self):
path = self.experiment.link(self.experiment_type)
results = path.table_instances[TNAME_EXPERIMENT].entities()
self.assertEqual(len(results), TEST_EXP_MAX)
def test_path_project(self):
path = self.experiment.link(self.experiment_type)
results = path.Experiment.attributes(
path.Experiment,
path.Experiment_Type.column_definitions['URI'],
path.Experiment_Type.column_definitions['Name'].alias('exptype')
)
result = results.fetch(limit=1)[0]
self.assertIn('Experiment:Name', result)
self.assertIn('Experiment:Time', result)
self.assertIn('URI', result)
self.assertIn('exptype', result)
@unittest.skipUnless(HAS_PANDAS, "pandas library not available")
def test_dataframe(self):
results = self.experiment.entities()
df = DataFrame(results)
self.assertEqual(len(df), TEST_EXP_MAX)
def test_insert_empty_entities(self):
results = self.experiment_copy.insert(None)
self.assertEqual(len(results), 0)
results = self.experiment_copy.insert([])
self.assertEqual(len(results), 0)
def test_insert_entities_not_iterable(self):
with self.assertRaises(TypeError):
self.experiment_type.insert(1)
def test_insert_entities0_not_dict(self):
with self.assertRaises(TypeError):
self.experiment_type.insert([1])
with self.assertRaises(TypeError):
self.experiment_type.insert('this is not a dict')
def test_insert(self):
results = self.experiment_copy.insert(_generate_experiment_entities(self.types, 10))
self.assertEqual(len(results), 10)
def test_update(self):
inserted = self.experiment_copy.insert(_generate_experiment_entities(self.types, 10))
self.assertEqual(len(inserted), 10)
# now change something in the first result
updates = [dict(**inserted[0])]
updates[0]['Name'] = '**CHANGED**'
updated = self.experiment_copy.update(updates)
self.assertEqual(len(updated), 1)
self.assertEqual(inserted[0]['RID'], updated[0]['RID'])
self.assertNotEqual(inserted[0]['Name'], updated[0]['Name'])
def test_update_empty_entities(self):
results = self.experiment_copy.update(None)
self.assertEqual(len(results), 0)
results = self.experiment_copy.update([])
self.assertEqual(len(results), 0)
def test_update_entities_not_iterable(self):
with self.assertRaises(TypeError):
self.experiment_type.update(1)
def test_update_entities0_not_dict(self):
with self.assertRaises(TypeError):
self.experiment_type.update([1])
with self.assertRaises(TypeError):
self.experiment_type.update('this is not a dict')
def test_nondefaults(self):
nondefaults = {'RID', 'RCB', 'RCT'}
results = self.experiment.entities()
self.assertEqual(len(results), TEST_EXP_MAX)
entities_copy = self.experiment_copy.insert(results, nondefaults=nondefaults, add_system_defaults=False)
self.assertEqual(len(results), len(entities_copy), 'entities not copied completely')
ig = itemgetter(*nondefaults)
for i in range(TEST_EXP_MAX):
self.assertEqual(ig(results[i]), ig(entities_copy[i]), 'copied values do not match')
def test_nondefaults_w_add_sys_defaults(self):
nondefaults = {'RID', 'RCB', 'RCT'}
results = self.experiment.entities()
self.assertEqual(len(results), TEST_EXP_MAX)
entities_copy = self.experiment_copy.insert(results, nondefaults=nondefaults)
self.assertEqual(len(results), len(entities_copy), 'entities not copied completely')
ig = itemgetter(*nondefaults)
for i in range(TEST_EXP_MAX):
self.assertEqual(ig(results[i]), ig(entities_copy[i]), 'copied values do not match')
@unittest.skipUnless(HAS_SUBTESTS, "This test is not available unless running python 3.4+")
def test_deepcopy_of_paths(self):
paths = [
self.experiment.path,
self.experiment.link(self.experiment_type),
self.experiment.link(self.experiment_type, on=(self.experiment.Type == self.experiment_type.ID)),
self.experiment.link(
self.project,
on=(
(self.experiment.Project_Investigator == self.project.Investigator) &
(self.experiment.Project_Num == self.project.Num)
)
),
self.project.filter(self.project.Num < 1000).link(self.experiment).link(self.experiment_type),
self.experiment.alias('Exp').link(self.experiment_type.alias('ExpType')),
self.experiment.filter(self.experiment.column_definitions['Name'] == TEST_EXP_NAME_FORMAT.format(1)),
self.experiment.filter(self.experiment.column_definitions['Amount'] < 10),
self.experiment.filter(
self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:])
),
self.experiment.filter(
~ (self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:]))
),
self.experiment.filter(
self.experiment.column_definitions['Name'].ciregexp(TEST_EXP_NAME_FORMAT.format(0)[10:])
& (self.experiment.column_definitions['Amount'] == 0)
)
]
for path in paths:
with self.subTest(name=path.uri):
cp = deepcopy(path)
self.assertNotEqual(path, cp)
self.assertEqual(path.uri, cp.uri)
def test_merge_paths(self):
path1 = self.experiment.filter(self.experiment.Amount >= 0)
path2 = self.experiment.link(self.experiment_type).filter(self.experiment_type.ID >= '0')
path3 = self.experiment.link(self.project).filter(self.project.Num >= 0)
original_uri = path1.uri
# merge paths 1..3
path1.merge(path2).merge(path3)
self.assertNotEqual(path1.uri, original_uri, "Merged path's URI should have changed from its original URI")
self.assertEqual(path1.context._name, path3.context._name, "Context of merged paths should equal far right-hand path's context")
self.assertGreater(len(path1.Experiment.entities()), 0, "Should have returned results")
def test_compose_paths(self):
path1 = self.experiment.filter(self.experiment.Amount >= 0)
path2 = self.experiment.link(self.experiment_type).filter(self.experiment_type.ID >= '0')
path3 = self.experiment.link(self.project).filter(self.project.Num >= 0)
original_uri = path1.uri
# compose paths 1..3
path = self.paths.compose(path1, path2, path3)
self.assertNotEqual(path, path1, "Compose should have copied the first path rather than mutate it")
self.assertNotEqual(path.uri, path1.uri, "Composed path URI should not match the first path URI")
self.assertEqual(path1.uri, original_uri, "First path was changed")
self.assertNotEqual(path.uri, original_uri, "Merged path's URI should have changed from its original URI")
self.assertEqual(path.context._name, path3.context._name, "Context of composed paths should equal far right-hand path's context")
self.assertGreater(len(path.Experiment.entities()), 0, "Should have returned results")
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
trankmichael/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
qifeigit/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
xho95/BuildingMachineLearningSystemsWithPython | ch05/classify.py | 4 | 8098 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import KFold
from sklearn import neighbors
from data import chosen, chosen_meta
from utils import plot_pr
from utils import plot_feat_importance
from utils import load_meta
from utils import fetch_posts
from utils import plot_feat_hist
from utils import plot_bias_variance
from utils import plot_k_complexity
# question Id -> {'features'->feature vector, 'answers'->[answer Ids]}, 'scores'->[scores]}
# scores will be added on-the-fly as the are not in meta
meta, id_to_idx, idx_to_id = load_meta(chosen_meta)
import nltk
# The sorting below is only to ensure reproducable numbers. Further down
# we will occasionally skip a fold when it contains instances of only
# one label. The two lines below ensure that the behavior is exactly the
# same for different runs.
all_questions = sorted([q for q, v in meta.items() if v['ParentId'] == -1])
all_answers = sorted([q for q, v in meta.items() if v['ParentId'] != -1])
feature_names = np.array((
'NumTextTokens',
'NumCodeLines',
'LinkCount',
'AvgSentLen',
'AvgWordLen',
'NumAllCaps',
'NumExclams',
'NumImages'
))
def prepare_sent_features():
for pid, text in fetch_posts(chosen, with_index=True):
if not text:
meta[pid]['AvgSentLen'] = meta[pid]['AvgWordLen'] = 0
else:
sent_lens = [len(nltk.word_tokenize(
sent)) for sent in nltk.sent_tokenize(text)]
meta[pid]['AvgSentLen'] = np.mean(sent_lens)
meta[pid]['AvgWordLen'] = np.mean(
[len(w) for w in nltk.word_tokenize(text)])
meta[pid]['NumAllCaps'] = np.sum(
[word.isupper() for word in nltk.word_tokenize(text)])
meta[pid]['NumExclams'] = text.count('!')
prepare_sent_features()
def get_features(aid):
return tuple(meta[aid][fn] for fn in feature_names)
qa_X = np.asarray([get_features(aid) for aid in all_answers])
classifying_answer = "good"
#classifying_answer = "poor"
if classifying_answer == "good":
# Score > 0 tests => positive class is good answer
qa_Y = np.asarray([meta[aid]['Score'] > 0 for aid in all_answers])
elif classifying_answer == "poor":
# Score <= 0 tests => positive class is poor answer
qa_Y = np.asarray([meta[aid]['Score'] <= 0 for aid in all_answers])
else:
raise Exception("classifying_answer='%s' is not supported" %
classifying_answer)
for idx, feat in enumerate(feature_names):
plot_feat_hist([(qa_X[:, idx], feat)])
#plot_feat_hist([(qa_X[:, idx], feature_names[idx]) for idx in [1,0]], 'feat_hist_two.png')
#plot_feat_hist([(qa_X[:, idx], feature_names[idx]) for idx in [3,4,5,6]], 'feat_hist_four.png')
avg_scores_summary = []
def measure(clf_class, parameters, name, data_size=None, plot=False):
start_time_clf = time.time()
if data_size is None:
X = qa_X
Y = qa_Y
else:
X = qa_X[:data_size]
Y = qa_Y[:data_size]
cv = KFold(n=len(X), n_folds=10, indices=True)
train_errors = []
test_errors = []
scores = []
roc_scores = []
fprs, tprs = [], []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for fold_idx, (train, test) in enumerate(cv):
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
only_one_class_in_train = len(set(y_train)) == 1
only_one_class_in_test = len(set(y_test)) == 1
if only_one_class_in_train or only_one_class_in_test:
# this would pose problems later on
continue
clf = clf_class(**parameters)
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
label_idx = 1
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, label_idx])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, label_idx])
roc_scores.append(auc(fpr, tpr))
fprs.append(fpr)
tprs.append(tpr)
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
# This threshold is determined at the end of the chapter 5,
# where we find conditions such that precision is in the area of
# about 80%. With it we trade off recall for precision.
threshold_for_detecting_good_answers = 0.59
print("Clone #%i" % fold_idx)
print(classification_report(y_test, proba[:, label_idx] >
threshold_for_detecting_good_answers, target_names=['not accepted', 'accepted']))
# get medium clone
scores_to_sort = pr_scores # roc_scores
medium = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
print("Medium clone is #%i" % medium)
if plot:
#plot_roc(roc_scores[medium], name, fprs[medium], tprs[medium])
plot_pr(pr_scores[medium], name, precisions[medium],
recalls[medium], classifying_answer + " answers")
if hasattr(clf, 'coef_'):
plot_feat_importance(feature_names, clf, name)
summary = (name,
np.mean(scores), np.std(scores),
np.mean(roc_scores), np.std(roc_scores),
np.mean(pr_scores), np.std(pr_scores),
time.time() - start_time_clf)
print(summary)
avg_scores_summary.append(summary)
precisions = precisions[medium]
recalls = recalls[medium]
thresholds = np.hstack(([0], thresholds[medium]))
idx80 = precisions >= 0.8
print("P=%.2f R=%.2f thresh=%.2f" % (precisions[idx80][0], recalls[
idx80][0], thresholds[idx80][0]))
return np.mean(train_errors), np.mean(test_errors)
def bias_variance_analysis(clf_class, parameters, name):
#import ipdb;ipdb.set_trace()
data_sizes = np.arange(60, 2000, 4)
train_errors = []
test_errors = []
for data_size in data_sizes:
train_error, test_error = measure(
clf_class, parameters, name, data_size=data_size)
train_errors.append(train_error)
test_errors.append(test_error)
plot_bias_variance(data_sizes, train_errors,
test_errors, name, "Bias-Variance for '%s'" % name)
def k_complexity_analysis(clf_class, parameters):
ks = np.hstack((np.arange(1, 20), np.arange(21, 100, 5)))
train_errors = []
test_errors = []
for k in ks:
parameters['n_neighbors'] = k
train_error, test_error = measure(
clf_class, parameters, "%dNN" % k, data_size=2000)
train_errors.append(train_error)
test_errors.append(test_error)
plot_k_complexity(ks, train_errors, test_errors)
for k in [5]:
# for k in [5, 10, 40]:
#measure(neighbors.KNeighborsClassifier, {'n_neighbors': k}, "%iNN" % k)
bias_variance_analysis(neighbors.KNeighborsClassifier, {
'n_neighbors': k}, "%iNN" % k)
k_complexity_analysis(neighbors.KNeighborsClassifier, {'n_neighbors': k})
from sklearn.linear_model import LogisticRegression
for C in [0.1]:
# for C in [0.01, 0.1, 1.0, 10.0]:
name = "LogReg C=%.2f" % C
bias_variance_analysis(LogisticRegression, {'penalty': 'l2', 'C': C}, name)
measure(LogisticRegression, {'penalty': 'l2', 'C': C}, name, plot=True)
print("=" * 50)
from operator import itemgetter
for s in reversed(sorted(avg_scores_summary, key=itemgetter(1))):
print("%-20s\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f\t%.5f" % s)
print("time spent:", time.time() - start_time)
| mit |
0-wiz-0/audacity | lib-src/portaudio-v19/test/patest_suggested_vs_streaminfo_latency.py | 30 | 5504 | #!/usr/bin/env python
"""
Run and graph the results of patest_suggested_vs_streaminfo_latency.c
Requires matplotlib for plotting: http://matplotlib.sourceforge.net/
"""
import os
from pylab import *
import numpy
from matplotlib.backends.backend_pdf import PdfPages
testExeName = "PATest.exe" # rename to whatever the compiled patest_suggested_vs_streaminfo_latency.c binary is
dataFileName = "patest_suggested_vs_streaminfo_latency.csv" # code below calls the exe to generate this file
inputDeviceIndex = -1 # -1 means default
outputDeviceIndex = -1 # -1 means default
sampleRate = 44100
pdfFilenameSuffix = "_wmme"
pdfFile = PdfPages("patest_suggested_vs_streaminfo_latency_" + str(sampleRate) + pdfFilenameSuffix +".pdf") #output this pdf file
def loadCsvData( dataFileName ):
params= ""
inputDevice = ""
outputDevice = ""
startLines = file(dataFileName).readlines(1024)
for line in startLines:
if "output device" in line:
outputDevice = line.strip(" \t\n\r#")
if "input device" in line:
inputDevice = line.strip(" \t\n\r#")
params = startLines[0].strip(" \t\n\r#")
data = numpy.loadtxt(dataFileName, delimiter=",", skiprows=4).transpose()
class R(object): pass
result = R()
result.params = params
for s in params.split(','):
if "sample rate" in s:
result.sampleRate = s
result.inputDevice = inputDevice
result.outputDevice = outputDevice
result.suggestedLatency = data[0]
result.halfDuplexOutputLatency = data[1]
result.halfDuplexInputLatency = data[2]
result.fullDuplexOutputLatency = data[3]
result.fullDuplexInputLatency = data[4]
return result;
def setFigureTitleAndAxisLabels( framesPerBufferString ):
title("PortAudio suggested (requested) vs. resulting (reported) stream latency\n" + framesPerBufferString)
ylabel("PaStreamInfo::{input,output}Latency (s)")
xlabel("Pa_OpenStream suggestedLatency (s)")
grid(True)
legend(loc="upper left")
def setDisplayRangeSeconds( maxSeconds ):
xlim(0, maxSeconds)
ylim(0, maxSeconds)
# run the test with different frames per buffer values:
compositeTestFramesPerBufferValues = [0]
# powers of two
for i in range (1,11):
compositeTestFramesPerBufferValues.append( pow(2,i) )
# multiples of 50
for i in range (1,20):
compositeTestFramesPerBufferValues.append( i * 50 )
# 10ms buffer sizes
compositeTestFramesPerBufferValues.append( 441 )
compositeTestFramesPerBufferValues.append( 882 )
# large primes
#compositeTestFramesPerBufferValues.append( 39209 )
#compositeTestFramesPerBufferValues.append( 37537 )
#compositeTestFramesPerBufferValues.append( 26437 )
individualPlotFramesPerBufferValues = [0,64,128,256,512] #output separate plots for these
isFirst = True
for framesPerBuffer in compositeTestFramesPerBufferValues:
commandString = testExeName + " " + str(inputDeviceIndex) + " " + str(outputDeviceIndex) + " " + str(sampleRate) + " " + str(framesPerBuffer) + ' > ' + dataFileName
print commandString
os.system(commandString)
d = loadCsvData(dataFileName)
if isFirst:
figure(1) # title sheet
gcf().text(0.1, 0.0,
"patest_suggested_vs_streaminfo_latency\n%s\n%s\n%s\n"%(d.inputDevice,d.outputDevice,d.sampleRate))
pdfFile.savefig()
figure(2) # composite plot, includes all compositeTestFramesPerBufferValues
if isFirst:
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency )
plot( d.suggestedLatency, d.halfDuplexInputLatency )
plot( d.suggestedLatency, d.fullDuplexOutputLatency )
plot( d.suggestedLatency, d.fullDuplexInputLatency )
if framesPerBuffer in individualPlotFramesPerBufferValues: # individual plots
figure( 3 + individualPlotFramesPerBufferValues.index(framesPerBuffer) )
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency, label="Half-duplex output latency" )
plot( d.suggestedLatency, d.halfDuplexInputLatency, label="Half-duplex input latency" )
plot( d.suggestedLatency, d.fullDuplexOutputLatency, label="Full-duplex output latency" )
plot( d.suggestedLatency, d.fullDuplexInputLatency, label="Full-duplex input latency" )
if framesPerBuffer == 0:
framesPerBufferText = "paFramesPerBufferUnspecified"
else:
framesPerBufferText = str(framesPerBuffer)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText)+" (detail)" )
pdfFile.savefig()
isFirst = False
figure(2)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues)+" (detail)" )
pdfFile.savefig()
pdfFile.close()
#uncomment this to display interactively, otherwise we just output a pdf
#show()
| gpl-2.0 |
lseman/pylspm | pylspm/qpLRlib4.py | 1 | 4837 | import numpy as np
from gurobipy import *
import matplotlib
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plota(x, y, ac, awL, awR, xname, yname, size):
plt.plot(x, y, 'o', markersize=2, label='Dados')
xvalues = np.arange(min(x), max(x), 0.1)
ylow = -awL[0].x + [(ac[i] - awL[i + 1].x) *
xvalues for i in range(size)][0]
ymid = [ac[i] * xvalues for i in range(size)][0]
yhigh = awR[0].x + [(ac[i] + awR[i + 1].x) *
xvalues for i in range(size)][0]
superior = plt.plot(xvalues, ylow, 'b--', label='Limite Inferior')
centroide = plt.plot(xvalues, ymid, 'k--', label='Centroide')
inferior = plt.plot(xvalues, yhigh, 'r--', label='Limite Superior')
plt.legend()
plt.xlabel(xname[0], fontsize=12)
plt.ylabel(yname, fontsize=12)
plt.savefig('imgs/fuzzy' + yname, bbox_inches='tight')
plt.clf()
plt.cla()
def otimiza(y, x, size, h, method='fuzzy', plotaIC='false'):
n = len(y)
model = Model("qTSQ-PLS-PM with fuzzy scheme")
model.setParam("OutputFlag", 0)
awL = {}
awR = {}
# h = 0
for i in range(size + 1):
awL[i] = model.addVar(lb=-0.0, name="awL%d" % i)
awR[i] = model.addVar(lb=-0.0, name="awR%d" % i)
ac, resid = np.linalg.lstsq(x, y)[:2]
yname = y.name
xname = x.columns.values
# print(['y: ' + yname])
# print('x: ' + xname)
y = y.values
x = x.values
model.setObjective(quicksum((float(np.dot(x[:, j], x[:, j].transpose()))
* (awL[j + 1] + awR[j + 1]) * (awL[j + 1] + awR[j + 1]))
for j in range(size))
+ (awL[0] + awR[0]) * (awL[0] + awR[0]),
GRB.MINIMIZE)
# Lembrar que no for não vai N
for i in range(n):
model.addConstr(
quicksum((ac[j] * x[i, j])
for j in range(size))
- (1 - h) * (awL[0] + quicksum((abs(x[i, j]) * awL[j + 1])
for j in range(size))) <= y[i])
for i in range(n):
model.addConstr(
quicksum((ac[j] * x[i, j])
for j in range(size))
+ (1 - h) * (awR[0] + quicksum((abs(x[i, j]) * awR[j + 1])
for j in range(size))) >= y[i])
model.optimize()
# print(awL)
# print(awR)
# plota(x, y, ac, awL, awR, xname, yname, size)
# ic = IC(x, y, ac, awL, awR, size)
# print(yname)
# print(xname)
# print(ic)
if plotaIC == 'false':
return [ac[i] for i in range(size)], [(ac[i] - awL[i + 1].x) for i in range(size)], [(ac[i] + awR[i + 1].x) for i in range(size)]
if plotaIC == 'true':
return model, ac, awL, awR
def IC(x, y, ac, awL, awR, size):
ylow = []
yhigh = []
ymid = []
for i in range(len(y)):
ylow.append((0 - awL[0].x) + [(ac[j] - awL[j + 1].x) *
x[i, j] for j in range(size)][0])
for i in range(len(y)):
yhigh.append((0 + awR[0].x) + [(ac[j] + awR[j + 1].x) *
x[i, j] for j in range(size)][0])
for i in range(len(y)):
ymid.append((ylow[i] + yhigh[i]) / 2)
SST = 0
for i in range(len(y)):
SST += (y[i] - ylow[i]) * (y[i] - ylow[i]) + \
(yhigh[i] - y[i]) * (yhigh[i] - y[i])
SSR = 0
for i in range(len(y)):
SSR += (ymid[i] - ylow[i]) * (ymid[i] - ylow[i]) + \
(yhigh[i] - ymid[i]) * (yhigh[i] - ymid[i])
IC = SSR / SST
return IC
def plotaIC(y, x, size):
h = 0
IClist = []
hlist = []
awRlist = []
awLlist = []
nomeia = y.name
for i in range(0, 19):
h += 0.05
print(h)
hlist.append(h)
model, ac, awL, awR = otimiza(y, x, size, h, plotaIC='true')
awRlist.append(awR[1].x)
awLlist.append(awL[1].x)
IClist.append(IC(x.values, y.values, ac, awL, awR, size))
x = hlist
y = IClist
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('índice h')
ax.set_ylabel('IC')
ax.set_zlabel('awR')
ax.set_axis_bgcolor('white')
ax.plot(x, y, awRlist)
plt.savefig('imgs/IC_awR_' + nomeia, bbox_inches='tight')
plt.clf()
plt.cla()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('índice h')
ax.set_ylabel('IC')
ax.set_zlabel('awL')
ax.set_axis_bgcolor('white')
ax.plot(x, y, awLlist)
plt.savefig('imgs/IC_awL_' + nomeia, bbox_inches='tight')
plt.clf()
plt.cla() | mit |
tomvand/paparazzi-gazebo | sw/ground_segment/python/gvf/gvfframe.py | 4 | 14949 | import wx
import time
from scipy import linalg as la
from matplotlib.path import Path
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
import matplotlib.pyplot as pl
import matplotlib.patches as patches
import numpy as np
import sys
from os import path, getenv
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
from settings_xml_parse import PaparazziACSettings
WIDTH = 800
HEIGHT = 800
class GVFFrame(wx.Frame):
def __init__(self, ac_id):
wx.Frame.__init__(self, id=-1, parent=None, \
name=u'GVF', size=wx.Size(WIDTH, HEIGHT), \
style=wx.DEFAULT_FRAME_STYLE, title=u'Guidance Vector Field')
# Vehicle variables
self.ac_id = ac_id
self.course = 0
self.yaw = 0
self.XY = np.array([0, 0])
# Desired trajectory
self.timer_traj = 0 # We do not update the traj every time we receive a msg
self.timer_traj_lim = 7 # (7+1) * 0.25secs
self.s = 0
self.kn = 0
self.ke = 0
self.map_gvf = map2d(np.array([0, 0]), 150000)
self.traj = None
# Frame
self.canvas = FigureCanvas(self, -1, self.map_gvf.fig)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnRedrawTimer, self.redraw_timer)
self.redraw_timer.Start(100)
# Ivy
self.interface = IvyMessagesInterface("GVF")
self.interface.subscribe(self.message_recv)
settings = PaparazziACSettings(ac_id)
self.ke_index = None
self.kn_index = None
self.indexes_are_good = 0
self.list_of_indexes = ['gvf_ke', 'gvf_kn']
for setting_ in self.list_of_indexes:
try:
index = settings.name_lookup[setting_].index
if setting_ == 'gvf_ke':
self.ke_index = index
if setting_ == 'gvf_kn':
self.kn_index = index
self.indexes_are_good = self.indexes_are_good + 1
except Exception as e:
print(e)
print(setting_ + " setting not found, \
have you forgotten gvf.xml in your settings?")
def message_recv(self, ac_id, msg):
if int(ac_id) == self.ac_id:
if msg.name == 'GPS':
self.course = int(msg.get_field(3))*np.pi/1800
if msg.name == 'NAVIGATION':
self.XY[0] = float(msg.get_field(2))
self.XY[1] = float(msg.get_field(3))
if msg.name == 'ATTITUDE':
self.yaw = float(msg.get_field(1))
if msg.name == 'DL_VALUE' and \
self.indexes_are_good == len(self.list_of_indexes):
if int(msg.get_field(0)) == int(self.ke_index):
self.ke = float(msg.get_field(1))
if self.traj is not None:
self.traj.vector_field(self.traj.XYoff, \
self.map_gvf.area, self.s, self.kn, self.ke)
if int(msg.get_field(0)) == int(self.kn_index):
self.kn = float(msg.get_field(1))
if self.traj is not None:
self.traj.vector_field(self.traj.XYoff, \
self.map_gvf.area, self.s, self.kn, self.ke)
if msg.name == 'GVF':
self.gvf_error = float(msg.get_field(0))
# Straight line
if int(msg.get_field(1)) == 0 \
and self.timer_traj == self.timer_traj_lim:
self.s = int(msg.get_field(2))
param = [float(x) for x in msg.get_field(3).split(',')]
a = param[0]
b = param[1]
c = param[2]
self.traj = traj_line(np.array([-100,100]), a, b, c)
self.traj.vector_field(self.traj.XYoff, self.map_gvf.area, \
self.s, self.kn, self.ke)
# Ellipse
if int(msg.get_field(1)) == 1 \
and self.timer_traj == self.timer_traj_lim:
self.s = int(msg.get_field(2))
param = [float(x) for x in msg.get_field(3).split(',')]
ex = param[0]
ey = param[1]
ea = param[2]
eb = param[3]
ealpha = param[4]
self.traj = traj_ellipse(np.array([ex, ey]), ealpha, ea, eb)
self.traj.vector_field(self.traj.XYoff, \
self.map_gvf.area, self.s, self.kn, self.ke)
# Sin
if int(msg.get_field(1)) == 2 \
and self.timer_traj == self.timer_traj_lim:
self.s = int(msg.get_field(2))
param = [float(x) for x in msg.get_field(3).split(',')]
a = param[0]
b = param[1]
alpha = param[2]
w = param[3]
off = param[4]
A = param[5]
self.traj = traj_sin(np.array([-100, 100]), a, b, alpha, \
w, off, A)
self.traj.vector_field(self.traj.XYoff, \
self.map_gvf.area, self.s, self.kn, self.ke)
self.timer_traj = self.timer_traj + 1
if self.timer_traj > self.timer_traj_lim:
self.timer_traj = 0
def draw_gvf(self, XY, yaw, course):
if self.traj is not None:
self.map_gvf.draw(XY, yaw, course, self.traj)
def OnClose(self, event):
self.interface.shutdown()
self.Destroy()
def OnRedrawTimer(self, event):
self.draw_gvf(self.XY, self.yaw, self.course)
self.canvas.draw()
class map2d:
def __init__(self, XYoff, area):
self.XYoff = XYoff
self.area = area
self.fig, self.ax = pl.subplots()
self.ax.set_xlabel('South [m]')
self.ax.set_ylabel('West [m]')
self.ax.set_title('2D Map')
self.ax.annotate('HOME', xy = (0, 0))
self.ax.set_xlim(XYoff[0]-0.5*np.sqrt(area), XYoff[0]+0.5*np.sqrt(area))
self.ax.set_ylim(XYoff[1]-0.5*np.sqrt(area), XYoff[1]+0.5*np.sqrt(area))
self.ax.axis('equal')
def vehicle_patch(self, XY, yaw):
Rot = np.array([[np.cos(yaw), np.sin(yaw)],[-np.sin(yaw), np.cos(yaw)]])
apex = 45*np.pi/180 # 30 degrees apex angle
b = np.sqrt(2*(self.area/2000) / np.sin(apex))
a = b*np.sin(apex/2)
h = b*np.cos(apex/2)
z1 = np.array([a/2, -h*0.3])
z2 = np.array([-a/2, -h*0.3])
z3 = np.array([0, h*0.6])
z1 = Rot.dot(z1)
z2 = Rot.dot(z2)
z3 = Rot.dot(z3)
verts = [(XY[0]+z1[0], XY[1]+z1[1]), \
(XY[0]+z2[0], XY[1]+z2[1]), \
(XY[0]+z3[0], XY[1]+z3[1]), \
(0, 0)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
return patches.PathPatch(path, facecolor='red', lw=2)
def draw(self, XY, yaw, course, traj):
self.ax.clear()
self.ax.plot(traj.traj_points[0, :], traj.traj_points[1, :])
self.ax.quiver(traj.mapgrad_X, traj.mapgrad_Y, \
traj.mapgrad_U, traj.mapgrad_V, color='Teal', \
pivot='mid', width=0.002)
self.ax.add_patch(self.vehicle_patch(XY, yaw)) # In radians
apex = 45*np.pi/180 # 30 degrees apex angle
b = np.sqrt(2*(self.area/2000) / np.sin(apex))
h = b*np.cos(apex/2)
self.ax.arrow(XY[0], XY[1], \
h*np.sin(course), h*np.cos(course),\
head_width=5, head_length=10, fc='k', ec='k')
self.ax.annotate('HOME', xy = (0, 0))
if isinstance(traj, traj_ellipse):
self.ax.annotate('ELLIPSE', xy = (traj.XYoff[0], traj.XYoff[1]))
self.ax.plot(0, 0, 'kx', ms=10, mew=2)
self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2)
elif isinstance(traj, traj_sin):
self.ax.annotate('SIN', xy = (traj.XYoff[0], traj.XYoff[1]))
self.ax.plot(0, 0, 'kx', ms=10, mew=2)
self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2)
elif isinstance(traj, traj_line):
self.ax.annotate('LINE', xy = (traj.XYoff[0], traj.XYoff[1]))
self.ax.plot(0, 0, 'kx', ms=10, mew=2)
self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2)
self.ax.set_xlabel('South [m]')
self.ax.set_ylabel('West [m]')
self.ax.set_title('2D Map')
self.ax.set_xlim(self.XYoff[0]-0.5*np.sqrt(self.area), \
self.XYoff[0]+0.5*np.sqrt(self.area))
self.ax.set_ylim(self.XYoff[1]-0.5*np.sqrt(self.area), \
self.XYoff[1]+0.5*np.sqrt(self.area))
self.ax.axis('equal')
self.ax.grid()
class traj_line:
def float_range(self, start, end, step):
while start <= end:
yield start
start += step
def __init__(self, Xminmax, a, b, alpha):
self.XYoff = np.array([a, b])
self.Xminmax = Xminmax
self.a, self.b, self.alpha = a, b, alpha
self.traj_points = np.zeros((2, 200))
self.mapgrad_X = []
self.mapgrad_Y = []
self.mapgrad_U = []
self.mapgrad_V = []
i = 0
for t in self.float_range(0, 1, 0.005):
x = (self.Xminmax[1]-self.Xminmax[0])*t + self.Xminmax[0]
i = i + 1
xtr = np.linspace(-200, 200, 400)
xl = xtr*np.sin(self.alpha) + a
yl = xtr*np.cos(self.alpha) + b
self.traj_points = np.vstack((xl, yl))
def param_point(self, t):
i = 0
def vector_field(self, XYoff, area, s, kn, ke):
self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\
XYoff[0]+0.5*np.sqrt(area):30j, \
XYoff[1]-0.5*np.sqrt(area):\
XYoff[1]+0.5*np.sqrt(area):30j]
nx = -np.cos(self.alpha)
ny = np.sin(self.alpha)
tx = s*ny
ty = -s*nx
ke = 1e-2*ke
e = (self.mapgrad_X-self.a)*nx + (self.mapgrad_Y-self.b)*ny
self.mapgrad_U = tx -ke*e*nx
self.mapgrad_V = ty -ke*e*ny
norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2)
self.mapgrad_U = self.mapgrad_U/norm
self.mapgrad_V = self.mapgrad_V/norm
class traj_ellipse:
def float_range(self, start, end, step):
while start <= end:
yield start
start += step
def __init__(self, XYoff, rot, a, b):
self.XYoff = XYoff
self.a, self.b = a, b
self.rot = rot
self.traj_points = np.zeros((2, 200))
self.mapgrad_X = []
self.mapgrad_Y = []
self.mapgrad_U = []
self.mapgrad_V = []
i = 0
for t in self.float_range(0, 1, 0.005):
self.traj_points[:, i] = self.param_point(t)
i = i + 1
def param_point(self, t):
angle = 2*np.pi*t
return self.XYoff \
+ np.array([self.a*np.cos(angle)*np.cos(-self.rot) - \
self.b*np.sin(angle)*np.sin(-self.rot), \
self.a*np.cos(angle)*np.sin(-self.rot) + \
self.b*np.sin(angle)*np.cos(-self.rot)])
def vector_field(self, XYoff, area, s, kn, ke):
self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\
XYoff[0]+0.5*np.sqrt(area):30j, \
XYoff[1]-0.5*np.sqrt(area):\
XYoff[1]+0.5*np.sqrt(area):30j]
Xel = (self.mapgrad_X-self.XYoff[0])*np.cos(self.rot) \
- (self.mapgrad_Y-self.XYoff[1])*np.sin(self.rot)
Yel = (self.mapgrad_X-self.XYoff[0])*np.sin(self.rot) \
+ (self.mapgrad_Y-self.XYoff[1])*np.cos(self.rot)
nx = 2*Xel*np.cos(self.rot)/self.a**2 \
+ 2*Yel*np.sin(self.rot)/self.b**2
ny = -2*Xel*np.sin(self.rot)/self.a**2 \
+ 2*Yel*np.cos(self.rot)/self.b**2
tx = s*ny
ty = -s*nx
e = (Xel/self.a)**2 + (Yel/self.b)**2 - 1
self.mapgrad_U = tx -ke*e*nx
self.mapgrad_V = ty -ke*e*ny
norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2)
self.mapgrad_U = self.mapgrad_U/norm
self.mapgrad_V = self.mapgrad_V/norm
class traj_sin:
def float_range(self, start, end, step):
while start <= end:
yield start
start += step
def __init__(self, Xminmax, a, b, alpha, w, off, A):
self.XYoff = np.array([a, b])
self.Xminmax = Xminmax
self.a, self.b, self.alpha, self.w, self.off, self.A = \
a, b, alpha, w, off, A
self.traj_points = np.zeros((2, 200))
self.mapgrad_X = []
self.mapgrad_Y = []
self.mapgrad_U = []
self.mapgrad_V = []
i = 0
for t in self.float_range(0, 1, 0.005):
x = (self.Xminmax[1]-self.Xminmax[0])*t + self.Xminmax[0]
i = i + 1
xtr = np.linspace(-200, 200, 400)
ytr = self.A*np.sin(self.w*xtr + self.off)
xsin = -xtr*np.sin(self.alpha) + ytr*np.cos(self.alpha) + a
ysin = xtr*np.cos(self.alpha) + ytr*np.sin(self.alpha) + b
self.traj_points = np.vstack((xsin, ysin))
def param_point(self, t):
i = 0
def vector_field(self, XYoff, area, s, kn, ke):
self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\
XYoff[0]+0.5*np.sqrt(area):30j, \
XYoff[1]-0.5*np.sqrt(area):\
XYoff[1]+0.5*np.sqrt(area):30j]
xs = (self.mapgrad_X-self.XYoff[0])*np.sin(self.alpha) \
- (self.mapgrad_Y-self.XYoff[1])*np.cos(self.alpha)
ys = -(self.mapgrad_X-self.XYoff[0])*np.cos(self.alpha) \
- (self.mapgrad_Y-self.XYoff[1])*np.sin(self.alpha)
ang = self.w*xs + self.off
nx = -np.cos(self.alpha) - \
self.A*self.w*np.cos(ang)*np.sin(self.alpha)
ny = -np.sin(self.alpha) + \
self.A*self.w*np.cos(ang)*np.cos(self.alpha)
tx = s*ny
ty = -s*nx
ke = 1e-2*ke
e = ys - self.A*np.sin(ang)
self.mapgrad_U = tx -ke*e*nx
self.mapgrad_V = ty -ke*e*ny
norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2)
self.mapgrad_U = self.mapgrad_U/norm
self.mapgrad_V = self.mapgrad_V/norm
| gpl-2.0 |
qPCR4vir/orange | Orange/orng/orngProjectionPursuit.py | 6 | 7987 | import orange
import numpy
import scipy.special
import scipy.optimize
import scipy.stats
from pylab import *
def sqrtm(mat):
""" Retruns the square root of the matrix mat """
U, S, V = numpy.linalg.svd(mat)
D = numpy.diag(numpy.sqrt(S))
return numpy.dot(numpy.dot(U,D),V)
def standardize(mat):
""" Subtracts means and multiplies by diagonal elements of inverse
square root of covariance matrix.
"""
av = numpy.average(mat, axis=0)
sigma = numpy.corrcoef(mat, rowvar=0)
srSigma = sqrtm(sigma)
isrSigma = numpy.linalg.inv(srSigma)
return (mat-av) * numpy.diag(isrSigma)
def friedman_tmp_func(alpha, Z=numpy.zeros((1,1)), J=5, n=1):
alpha = numpy.array(alpha)
pols = [scipy.special.legendre(j) for j in range(0,J+1)]
vals0 = [numpy.dot(alpha.T, Z[i,:]) for i in range(n)]
def f_tmp(x): return 2*x-1
vals = map(f_tmp, map(scipy.stats.zprob, vals0))
val = [1./n*sum(map(p, vals))**2 for p in pols]
return vals, pols, - 0.5 * sum([(2*j+1)*v for j, v in enumerate(val)])
class ProjectionPursuit:
FRIEDMAN = 0
MOMENT = 1
SILHUETTE = 2
HARTINGAN = 3
def __init__(self, data, index = FRIEDMAN, dim=2, maxiter=10):
self.dim = dim
if type(data) == orange.ExampleTable:
self.dataNP = data.toNumpy()[0] # TODO: check if conversion of discrete values works ok
else:
self.dataNP = data
self.Z = standardize(self.dataNP)
self.totalSize, self.nVars = numpy.shape(self.Z)
self.maxiter = maxiter
self.currentOptimum = None
self.index = index
def optimize(self, maxiter = 5, opt_method=scipy.optimize.fmin):
func = self.getIndex()
if self.currentOptimum != None:
x = self.currentOptimum
else:
x = numpy.random.rand(self.dim * self.nVars)
alpha = opt_method(func, x, maxiter=maxiter).reshape(self.dim * self.nVars,1)
self.currentOptimum = alpha
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def find_optimum(self, opt_method=scipy.optimize.fmin):
func = self.getIndex()
alpha = opt_method(func, \
numpy.random.rand(self.dim * self.nVars),\
maxiter=self.maxiter).reshape(self.dim * self.nVars,1)
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def getIndex(self):
if self.index == self.FRIEDMAN:
return self.getFriedmanIndex()
elif self.index == self.MOMENT:
return self.getMomentIndex()
elif self.index == self.SILHUETTE:
return self.getSilhouetteBasedIndex()
elif self.index == self.HARTINGAN:
return self.getHartinganBasedIndex()
def getFriedmanIndex(self, J=5):
if self.dim == 1:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
vals, pols, val = friedman_tmp_func(alpha, Z=Z, J=J, n=n)
return val
elif self.dim == 2:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
vals1, pols, val1 = friedman_tmp_func(alpha1, Z=Z, J=J, n=n)
vals2, pols, val2 = friedman_tmp_func(alpha2, Z=Z, J=J, n=n)
val12 = - 0.5 * sum([sum([(2*j+1)*(2*k+1)*vals1[j]*vals2[k] for k in range(0, J+1-j)]) \
for j in range(0,J+1)])
## print val1, val2
return 0.5 * (val1 + val2 + val12)
return func
def getMomentIndex(self): # lahko dodas faktor 1./12
if self.dim == 1:
def func(alpha):
smpl = numpy.dot(self.Z, alpha)
return scipy.stats.kstat(smpl, n=3) ** 2 + 0.25 * scipy.stats.kstat(smpl, n=4)
else:
print "To do."
return func
def getSilhouetteBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km = orngClustering.KMeans(smpl, centroids=nClusters)
score = orngClustering.score_silhouette(km)
return -score
import functools
silhIndex = functools.partial(func, nClusters=nClusters)
return silhIndex
def getHartinganBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km1 = orngClustering.KMeans(smpl, centroids=nClusters)
km2 = orngClustering.KMeans(smpl, centroids=nClusters)
score = (self.totalSize - nClusters - 1) * (km1.score-km2.score) / (km2.score)
return -score
import functools
hartinganIndex = functools.partial(func, nClusters=nClusters)
return hartinganIndex
def draw_scatter_hist(x,y, fileName="lala.png"):
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
clf()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
figure(1, figsize=(8,8))
axScatter = axes(rect_scatter)
axHistx = axes(rect_histx)
axHisty = axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = numpy.max([numpy.max(np.fabs(x)), numpy.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim( (-lim, lim) )
axScatter.set_ylim( (-lim, lim) )
bins = numpy.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
savefig(fileName)
if __name__=="__main__":
## data = orange.ExampleTable("c:\\Work\\Subgroup discovery\\iris.tab")
data = orange.ExampleTable(r"E:\Development\Orange Datasets\UCI\iris.tab")
data = data.select(data.domain.attributes)
impmin = orange.ImputerConstructor_minimal(data)
data = impmin(data)
ppy = ProjectionPursuit(data, dim=2, maxiter=100)
#ppy.friedman_index(J=5)
#ppy.silhouette_based_index(nClusters=2)
## import os
## os.chdir("C:\\Work\\Subgroup discovery")
#draw_scatter_hist(ppy.friedmanProjData[:,0], ppy.friedmanProjData[:,1])
#draw_scatter_hist(ppy.silhouetteProjData[:,0], ppy.silhouetteProjData[:,1])
print ppy.optimize()
| gpl-3.0 |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/mpl_toolkits/axes_grid1/inset_locator.py | 8 | 10138 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.offsetbox import AnchoredOffsetbox
#from matplotlib.transforms import IdentityTransform
import matplotlib.transforms as mtrans
from .parasite_axes import HostAxes # subclasses mpl_axes
from matplotlib.transforms import Bbox, TransformedBbox, IdentityTransform
from matplotlib.patches import Patch
from matplotlib.path import Path
from matplotlib.patches import Rectangle
class InsetPosition(object):
def __init__(self, parent, lbwh):
self.parent = parent
self.lbwh = lbwh # position of the inset axes in
# the normalized coordinate of the parent axes
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = mtrans.BboxTransformTo(bbox_parent)
bbox_inset = mtrans.Bbox.from_bounds(*self.lbwh)
bb = mtrans.TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(loc,
pad=0., child=None,
borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = mtrans.Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = mtrans.TransformedBbox(bbox_canvas, tr)
return bb
from . import axes_size as Size
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
self.axes = None
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
super(AnchoredSizeLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredSizeLocator, self).__call__(ax, renderer)
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
self.axes = None
def get_extent(self, renderer):
bb = mtrans.TransformedBbox(self.axes.viewLim,
self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredZoomLocator, self).__call__(ax, renderer)
class BboxPatch(Patch):
def __init__(self, bbox, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0, 0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2, y2]]
#Path()
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = (list(path1.vertices) +
list(path2.vertices) +
[path1.vertices[0]])
return Path(path_merged)
def _add_inset_axes(parent_axes, inset_axes):
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| gpl-2.0 |
timestocome/Test-stock-prediction-algorithms | Gold/GoldPhaseSpace.py | 1 | 2777 |
# http://github.com/timestocome
# look at phase space plots for gold prices
# see if any strange attractors appear
# US stop using gold standard 1971
# During the gold standard there is increased volatility during wars
# some volatility during wars after, but it's buried in speculation
# interesting gaps in the gold prices - attractor / repeller prices?
# more digging is needed.
# exploring some of the things in ...
# http://www.chaos.gb.net/ClydeOsler1997.pdf
# I was hoping to find chaos, but other than some unexpected
# price point gaps after the US left the gold standard
# things look pretty linear
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
######################################################################
# load data
########################################################################
# read in gold file
data = pd.read_csv('data/Gold_all.csv', parse_dates=True, index_col=0)
data = data[['Open']]
data['Open'] = pd.to_numeric(data['Open'], errors='coerce')
data['Volatility'] = data['Open'] - data['Open'].shift(1)
data = data.dropna()
gold_standard = data.loc[data.index < '01-01-1971']
gold = data.loc[data.index > '01-01-1971']
print(len(gold_standard), len(gold))
########################################################################
plt.figure(figsize=(18, 15))
plt.title('Gold while US on gold standard')
plt.subplot(4,1,1)
plt.plot(gold_standard['Open'], label='Value')
plt.plot(gold_standard['Volatility'] , label='Volatility')
plt.ylabel('Value, Volatility')
plt.xlabel('Time')
plt.legend(loc='best')
plt.subplot(4,1,2)
plt.scatter(gold_standard['Open'], gold_standard['Volatility'])
plt.xlabel('Value')
plt.ylabel('Volatility')
plt.subplot(4,1,3)
n_bins = 100
plt.hist(gold_standard['Open'], n_bins, normed=1, histtype='bar')
plt.xlabel('Histogram of Value')
plt.subplot(4,1,4)
n_bins = 100
plt.hist(gold_standard['Volatility'], n_bins, normed=1, histtype='bar')
plt.xlabel('Histogram of Volatility')
plt.savefig("Gold_duringGoldStandard.png")
#----------------------------------------------------
plt.figure(figsize=(18, 15))
plt.title("Gold after US ditches gold standard")
plt.subplot(4,1,1)
plt.plot(gold['Open'], label='Value')
plt.plot(gold['Volatility'] , label='Volatility')
plt.ylabel('Value, Volatility')
plt.xlabel('Time')
plt.legend(loc='best')
plt.subplot(4,1,2)
plt.scatter(gold['Open'], gold['Volatility'])
plt.xlabel('Value')
plt.ylabel('Volatility')
plt.subplot(4,1,3)
n_bins = 100
plt.hist(gold['Open'], n_bins, normed=1, histtype='bar')
plt.xlabel('Histogram of Value')
plt.subplot(4,1,4)
n_bins = 100
plt.hist(gold['Volatility'], n_bins, normed=1, histtype='bar')
plt.xlabel('Histogram of Volatility')
plt.savefig("Gold_offGoldStandard.png")
plt.show()
| mit |
charanpald/tyre-hug | tyrehug/exp/regressionexp.py | 1 | 2344 | from sklearn.datasets import load_boston, load_diabetes, load_breast_cancer
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge, LassoCV, ElasticNetCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler
import numpy
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3)
sigma = 0.3
X, y, v = make_regression(100, 200, n_informative=150, noise=sigma, effective_rank=100, coef=True)
# X, y, v = make_regression(100, 50, n_informative=30, noise=sigma, effective_rank=20, coef=True)
# print(v)
# X, y = load_boston(return_X_y=True)
# X, y = load_diabetes(return_X_y=True)
# X, y = load_breast_cancer(return_X_y=True)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
folds = 5
alphas = numpy.linspace(10.0**-3, 10, 200)
l1_ratios = 1 - numpy.logspace(-10, 0, 100)
# Ridge Regression
mses = numpy.zeros_like(alphas)
for i, alpha in enumerate(alphas):
learner = Ridge(alpha=alpha, fit_intercept=True)
scores = cross_val_score(learner, X, y, cv=folds, scoring="neg_mean_squared_error")
mses[i] = numpy.abs(scores.mean())
learner = Ridge(alpha=alphas[numpy.argmin(mses)], fit_intercept=False)
learner.fit(X_train, y_train)
y_pred = learner.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("Ridge Regression: alpha={:.4f} mse={:.3f} nnz={}".format(alphas[numpy.argmin(mses)], mse, numpy.count_nonzero(learner.coef_)))
# LASSO
mses = numpy.zeros_like(alphas)
learner = LassoCV(alphas=alphas, fit_intercept=False, cv=folds, n_jobs=-1)
learner.fit(X_train, y_train)
y_pred = learner.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("LASSO: alpha={:.4f} mse={:.3f} nnz={}".format(learner.alpha_, mse, numpy.count_nonzero(learner.coef_)))
print(y_pred)
print(learner.coef_)
# Elastic Net
learner = ElasticNetCV(l1_ratio=l1_ratios, alphas=alphas, fit_intercept=False, cv=folds, n_jobs=-1, max_iter=5000)
learner.fit(X_train, y_train)
y_pred = learner.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("Elastic Net: alpha={:.4f} l1_ratio={:.4f} mse={:.3f} nnz={}".format(learner.alpha_, learner.l1_ratio_, mse, numpy.count_nonzero(learner.coef_)))
| mit |
mpatacchiola/dissecting-reinforcement-learning | environments/inverted_pendulum.py | 2 | 7824 | #!/usr/bin/env python
# MIT License
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io/blog/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Implentation of the Inverted Pendulum problem using the notation of the book:
# 'Statistical Reinforcement Learning' by Masashi Sugiyama
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class InvertedPendulum:
def __init__(self, pole_mass=2.0, cart_mass=8.0, pole_lenght=0.5, delta_t=0.1):
""" Create a new pendulum object.
It is possible to pass the parameter of the simulation.
@param pole_mass: the mass of the pole (default 2.0 Kg)
@param cart_mass: the mass of the cart (default 8.0 Kg)
@param pole_lenght: the lenght of the pole (default 0.5 m)
@param delta_t: the time step in seconds (default 0.1 s)
"""
self.angle_list = list()
self.gravity = 9.8
self.delta_t = delta_t
self.pole_mass = pole_mass
self.cart_mass = cart_mass
self.pole_lenght = pole_lenght
self.angle_t = np.random.normal(0, 0.05) # radians (vertical position)
self.angular_velocity_t = 0.0
self.alpha = 1.0 / (self.pole_mass + self.cart_mass)
def reset(self, exploring_starts=True, initial_angle=0.0):
""" It reset the pendulum to an initial position [0, 2*pi]
@param exploring_starts: if True a random position is taken
@param initial_angle: the initial position of the pendulum (requires exploring_starts=False)
@return: it returns the initial position of the pendulum and the velocity
"""
if exploring_starts:
initial_angle = np.random.uniform(0, np.pi/2.0)
#if initial_angle < -np.pi:
# initial_angle = -np.pi
#elif initial_angle > np.pi:
# initial_angle = np.pi
#else:
self.angle_t = np.random.normal(0, 0.1) # radians (vertical position)
self.angle_list = [] # clear the list
self.angular_velocity_t = 0.0
self.angle_list.append(initial_angle)
return [self.angle_t, self.angular_velocity_t]
def step(self, action):
"""Perform one step in the environment following the action.
@param action: an integer representing one of three actions [0, 1, 2]
where 0=move_left, 1=do_not_move, 2=move_right
@return: (angle_t1, angular_velocity_t1), reward, done
where reward is 0.0 when the pole is horizontal and 1.0 if vertical
done is True when the goal is reached
"""
if(action >= 3):
raise ValueError("[INVERTED PENDULUM][ERROR] The action value "
+ str(action) + " is out of range.")
done = False
reward = -0.01
action_list = [-50, 0, +50]
action_t = action_list[action]
angular_velocity_t1 = self.angular_velocity_t + \
(self.gravity * np.sin(self.angle_t) - \
self.alpha * self.pole_mass * self.pole_lenght * np.power(self.angular_velocity_t, 2) * (np.sin(2*self.angular_velocity_t)/2.0) + \
self.alpha * np.cos(self.angle_t) * action_t) / \
((4/3) * self.pole_lenght - self.alpha * self.pole_mass * self.pole_lenght * np.power(np.sin(self.angle_t), 2)) * self.delta_t
angle_t1 = self.angle_t + (angular_velocity_t1 * self.delta_t)
# Check the limit condition (horizontal pole)
if angle_t1 < -(np.pi/2.0):
angle_t1 = -(np.pi/2.0)
angular_velocity_t1 = 0
if angle_t1 > (np.pi/2.0):
angle_t1 = (np.pi/2.0)
angular_velocity_t1 = 0
# Assign the new position and velocity
self.angle_t = angle_t1
self.angular_velocity_t= angular_velocity_t1
self.angle_list.append(angle_t1)
# Reward and done
if angle_t1 >= (np.pi/2.0) or angle_t1 <= -(np.pi/2.0):
reward = 0.0
done = True
else:
reward = np.cos(angle_t1)
done = False
# Return state_t1, reward, done
return [angle_t1, angular_velocity_t1], reward, done
def render(self, file_path='./inverted_pendulum.mp4', mode='mp4'):
""" When the method is called it saves an animation
of the steps happened until that point in the episode.
Ideally it should be called at the end of the episode,
or every k episodes.
ATTENTION: It requires avconv and/or imagemagick installed.
@param file_path: the name and path of the video file
@param mode: the file can be saved as 'gif' or 'mp4'
"""
# Plot init
fig = plt.figure()
axis_limit = self.pole_lenght + (self.pole_lenght * 0.5)
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-axis_limit, axis_limit), ylim=(0.0, 1.5*axis_limit))
ax.grid(False) # disable the grid
ax.set_aspect('equal')
ax.set_yticklabels([])
# x_line = np.linspace(start=-axis_limit, stop=axis_limit, num=100)
# y_line = np.zeros(100)
# ax.plot(x_line, y_line) # plot the base-line
# line, _ = ax.plot(x, y, 'o-', lw=2)
line, = ax.plot([], [],color='black', linestyle='solid', linewidth=1.5, marker='o', markerfacecolor='#aa0000', markersize=10, zorder=1)
# Adding the brown circle pad
circle = plt.Circle((0.0,-0.01), radius=0.05, color='#2b2200', fill=True, zorder=2)
ax.add_patch(circle)
# Adding the text
time_text = ax.text(0.05, 0.85, '', transform=ax.transAxes)
_angle_list = self.angle_list
_delta_t = self.delta_t
def _init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def _animate(i):
angle_cos = np.cos(_angle_list[i]) * self.pole_lenght
angle_sin = np.sin(_angle_list[i]) * self.pole_lenght
x1, y1 = [0, angle_sin], [0, angle_cos]
#y1 = (angle_cos, angle_sin)
line.set_data(x1, y1)
time_text.set_text("Time: " + str(np.round(i*_delta_t, 1)) + "s" + '\n' + "Frame: " + str(i))
return line, time_text
ani = animation.FuncAnimation(fig, _animate, np.arange(1, len(self.angle_list)),
blit=True, init_func=_init, repeat=False)
if mode == 'gif':
ani.save(file_path, writer='imagemagick', fps=int(1/self.delta_t))
elif mode == 'mp4':
ani.save(file_path, fps=int(1/self.delta_t), writer='avconv', codec='libx264')
# Clear the figure
fig.clear()
plt.close(fig)
| mit |
Midafi/scikit-image | doc/examples/plot_shapes.py | 22 | 1913 | """
======
Shapes
======
This example shows how to draw several different shapes:
- line
- Bezier curve
- polygon
- circle
- ellipse
Anti-aliased drawing for:
- line
- circle
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import (line, polygon, circle,
circle_perimeter,
ellipse, ellipse_perimeter,
bezier_curve)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 6))
img = np.zeros((500, 500, 3), dtype=np.double)
# draw line
rr, cc = line(120, 123, 20, 400)
img[rr, cc, 0] = 255
# fill polygon
poly = np.array((
(300, 300),
(480, 320),
(380, 430),
(220, 590),
(300, 300),
))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc, 1] = 1
# fill circle
rr, cc = circle(200, 200, 100, img.shape)
img[rr, cc, :] = (1, 1, 0)
# fill ellipse
rr, cc = ellipse(300, 300, 100, 200, img.shape)
img[rr, cc, 2] = 1
# circle
rr, cc = circle_perimeter(120, 400, 15)
img[rr, cc, :] = (1, 0, 0)
# Bezier curve
rr, cc = bezier_curve(70, 100, 10, 10, 150, 100, 1)
img[rr, cc, :] = (1, 0, 0)
# ellipses
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 4.)
img[rr, cc, :] = (1, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=-math.pi / 4.)
img[rr, cc, :] = (0, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 2.)
img[rr, cc, :] = (1, 1, 1)
ax1.imshow(img)
ax1.set_title('No anti-aliasing')
ax1.axis('off')
from skimage.draw import line_aa, circle_perimeter_aa
img = np.zeros((100, 100), dtype=np.double)
# anti-aliased line
rr, cc, val = line_aa(12, 12, 20, 50)
img[rr, cc] = val
# anti-aliased circle
rr, cc, val = circle_perimeter_aa(60, 40, 30)
img[rr, cc] = val
ax2.imshow(img, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_title('Anti-aliasing')
ax2.axis('off')
plt.show()
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_dates.py | 2 | 17479 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import map
import datetime
import warnings
import tempfile
import pytest
import dateutil
import pytz
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
@image_comparison(baseline_images=['date_empty'], extensions=['png'])
def test_date_empty():
# make sure mpl does the right thing when told to plot dates even
# if no date data has been presented, cf
# http://sourceforge.net/tracker/?func=detail&aid=2850075&group_id=80706&atid=560720
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis_date()
@image_comparison(baseline_images=['date_axhspan'], extensions=['png'])
def test_date_axhspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvspan'], extensions=['png'])
def test_date_axvspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2010, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_xlim(t0 - datetime.timedelta(days=720),
tf + datetime.timedelta(days=720))
fig.autofmt_xdate()
@image_comparison(baseline_images=['date_axhline'],
extensions=['png'])
def test_date_axhline():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvline'],
extensions=['png'])
def test_date_axvline():
# test ax hline with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvline(t0, color="red", lw=3)
ax.set_xlim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.autofmt_xdate()
def test_too_many_date_ticks():
# Attempt to test SF 2715172, see
# https://sourceforge.net/tracker/?func=detail&aid=2715172&group_id=80706&atid=560720
# setting equal datetimes triggers and expander call in
# transforms.nonsingular which results in too many ticks in the
# DayLocator. This should trigger a Locator.MAXTICKS RuntimeError
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 20)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim((t0, tf), auto=True)
ax.plot([], [])
ax.xaxis.set_major_locator(mdates.DayLocator())
with pytest.raises(RuntimeError):
fig.savefig('junk.png')
@image_comparison(baseline_images=['RRuleLocator_bounds'], extensions=['png'])
def test_RRuleLocator():
import matplotlib.testing.jpl_units as units
units.register()
# This will cause the RRuleLocator to go out of bounds when it tries
# to add padding to the limits, so we make sure it caps at the correct
# boundary values.
t0 = datetime.datetime(1000, 1, 1)
tf = datetime.datetime(6000, 1, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
rrule = mdates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
locator = mdates.RRuleLocator(rrule)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.autoscale_view()
fig.autofmt_xdate()
def test_RRuleLocator_dayrange():
loc = mdates.DayLocator()
x1 = datetime.datetime(year=1, month=1, day=1, tzinfo=pytz.UTC)
y1 = datetime.datetime(year=1, month=1, day=16, tzinfo=pytz.UTC)
loc.tick_values(x1, y1)
# On success, no overflow error shall be thrown
@image_comparison(baseline_images=['DateFormatter_fractionalSeconds'],
extensions=['png'])
def test_DateFormatter():
import matplotlib.testing.jpl_units as units
units.register()
# Lets make sure that DateFormatter will allow us to have tick marks
# at intervals of fractional seconds.
t0 = datetime.datetime(2001, 1, 1, 0, 0, 0)
tf = datetime.datetime(2001, 1, 1, 0, 0, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
# rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
# locator = mpldates.RRuleLocator( rrule )
# ax.xaxis.set_major_locator( locator )
# ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )
ax.autoscale_view()
fig.autofmt_xdate()
def test_date_formatter_strftime():
"""
Tests that DateFormatter matches datetime.strftime,
check microseconds for years before 1900 for bug #3179
as well as a few related issues for years before 1900.
"""
def test_strftime_fields(dt):
"""For datetime object dt, check DateFormatter fields"""
# Note: the last couple of %%s are to check multiple %s are handled
# properly; %% should get replaced by %.
formatter = mdates.DateFormatter("%w %d %m %y %Y %H %I %M %S %%%f %%x")
# Compute date fields without using datetime.strftime,
# since datetime.strftime does not work before year 1900
formatted_date_str = (
"{weekday} {day:02d} {month:02d} {year:02d} {full_year:04d} "
"{hour24:02d} {hour12:02d} {minute:02d} {second:02d} "
"%{microsecond:06d} %x"
.format(
weekday=str((dt.weekday() + 1) % 7),
day=dt.day,
month=dt.month,
year=dt.year % 100,
full_year=dt.year,
hour24=dt.hour,
hour12=((dt.hour-1) % 12) + 1,
minute=dt.minute,
second=dt.second,
microsecond=dt.microsecond))
assert formatter.strftime(dt) == formatted_date_str
try:
# Test strftime("%x") with the current locale.
import locale # Might not exist on some platforms, such as Windows
locale_formatter = mdates.DateFormatter("%x")
locale_d_fmt = locale.nl_langinfo(locale.D_FMT)
expanded_formatter = mdates.DateFormatter(locale_d_fmt)
assert locale_formatter.strftime(dt) == \
expanded_formatter.strftime(dt)
except (ImportError, AttributeError):
pass
for year in range(1, 3000, 71):
# Iterate through random set of years
test_strftime_fields(datetime.datetime(year, 1, 1))
test_strftime_fields(datetime.datetime(year, 2, 3, 4, 5, 6, 12345))
def test_date_formatter_callable():
scale = -11
locator = mock.Mock(_get_unit=mock.Mock(return_value=scale))
callable_formatting_function = (lambda dates, _:
[dt.strftime('%d-%m//%Y') for dt in dates])
formatter = mdates.AutoDateFormatter(locator)
formatter.scaled[-10] = callable_formatting_function
assert formatter([datetime.datetime(2014, 12, 25)]) == ['25-12//2014']
def test_drange():
"""
This test should check if drange works as expected, and if all the
rounding errors are fixed
"""
start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
delta = datetime.timedelta(hours=1)
# We expect 24 values in drange(start, end, delta), because drange returns
# dates from an half open interval [start, end)
assert len(mdates.drange(start, end, delta)) == 24
# if end is a little bit later, we expect the range to contain one element
# more
end = end + datetime.timedelta(microseconds=1)
assert len(mdates.drange(start, end, delta)) == 25
# reset end
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
# and tst drange with "complicated" floats:
# 4 hours = 1/6 day, this is an "dangerous" float
delta = datetime.timedelta(hours=4)
daterange = mdates.drange(start, end, delta)
assert len(daterange) == 6
assert mdates.num2date(daterange[-1]) == (end - delta)
def test_empty_date_with_year_formatter():
# exposes sf bug 2861426:
# https://sourceforge.net/tracker/?func=detail&aid=2861426&group_id=80706&atid=560720
# update: I am no longer believe this is a bug, as I commented on
# the tracker. The question is now: what to do with this test
import matplotlib.dates as dates
fig = plt.figure()
ax = fig.add_subplot(111)
yearFmt = dates.DateFormatter('%Y')
ax.xaxis.set_major_formatter(yearFmt)
with tempfile.TemporaryFile() as fh:
with pytest.raises(ValueError):
fig.savefig(fh)
def test_auto_date_locator():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator()
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
d1 = datetime.datetime(1990, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
'2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
'2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
'2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
'2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
'1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
'1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
'1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
'1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
'1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=141),
['1990-01-05 00:00:00+00:00', '1990-01-26 00:00:00+00:00',
'1990-02-16 00:00:00+00:00', '1990-03-09 00:00:00+00:00',
'1990-03-30 00:00:00+00:00', '1990-04-20 00:00:00+00:00',
'1990-05-11 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
'1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
'1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
'1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
'1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
'1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
'1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
'1990-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
'1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
'1990-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
'1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
'1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
'1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
'1990-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1989-12-31 23:59:59.999507+00:00',
'1990-01-01 00:00:00+00:00',
'1990-01-01 00:00:00.000502+00:00',
'1990-01-01 00:00:00.001005+00:00',
'1990-01-01 00:00:00.001508+00:00']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert list(map(str, mdates.num2date(locator()))) == expected
@image_comparison(baseline_images=['date_inverted_limit'],
extensions=['png'])
def test_date_inverted_limit():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
ax.invert_yaxis()
fig.subplots_adjust(left=0.25)
def _test_date2num_dst(date_range, tz_convert):
# Timezones
BRUSSELS = pytz.timezone('Europe/Brussels')
UTC = pytz.UTC
# Create a list of timezone-aware datetime objects in UTC
# Interval is 0b0.0000011 days, to prevent float rounding issues
dtstart = datetime.datetime(2014, 3, 30, 0, 0, tzinfo=UTC)
interval = datetime.timedelta(minutes=33, seconds=45)
interval_days = 0.0234375 # 2025 / 86400 seconds
N = 8
dt_utc = date_range(start=dtstart, freq=interval, periods=N)
dt_bxl = tz_convert(dt_utc, BRUSSELS)
expected_ordinalf = [735322.0 + (i * interval_days) for i in range(N)]
actual_ordinalf = list(mdates.date2num(dt_bxl))
assert actual_ordinalf == expected_ordinalf
def test_date2num_dst():
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
class dt_tzaware(datetime.datetime):
"""
This bug specifically occurs because of the normalization behavior of
pandas Timestamp objects, so in order to replicate it, we need a
datetime-like object that applies timezone normalization after
subtraction.
"""
def __sub__(self, other):
r = super(dt_tzaware, self).__sub__(other)
tzinfo = getattr(r, 'tzinfo', None)
if tzinfo is not None:
localizer = getattr(tzinfo, 'normalize', None)
if localizer is not None:
r = tzinfo.normalize(r)
if isinstance(r, datetime.datetime):
r = self.mk_tzaware(r)
return r
def __add__(self, other):
return self.mk_tzaware(super(dt_tzaware, self).__add__(other))
def astimezone(self, tzinfo):
dt = super(dt_tzaware, self).astimezone(tzinfo)
return self.mk_tzaware(dt)
@classmethod
def mk_tzaware(cls, datetime_obj):
kwargs = {}
attrs = ('year',
'month',
'day',
'hour',
'minute',
'second',
'microsecond',
'tzinfo')
for attr in attrs:
val = getattr(datetime_obj, attr, None)
if val is not None:
kwargs[attr] = val
return cls(**kwargs)
# Define a date_range function similar to pandas.date_range
def date_range(start, freq, periods):
dtstart = dt_tzaware.mk_tzaware(start)
return [dtstart + (i * freq) for i in range(periods)]
# Define a tz_convert function that converts a list to a new time zone.
def tz_convert(dt_list, tzinfo):
return [d.astimezone(tzinfo) for d in dt_list]
_test_date2num_dst(date_range, tz_convert)
def test_date2num_dst_pandas():
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
pd = pytest.importorskip('pandas')
def tz_convert(*args):
return pd.DatetimeIndex.tz_convert(*args).astype(object)
_test_date2num_dst(pd.date_range, tz_convert)
def test_DayLocator():
with pytest.raises(ValueError):
mdates.DayLocator(interval=-1)
with pytest.raises(ValueError):
mdates.DayLocator(interval=-1.5)
with pytest.raises(ValueError):
mdates.DayLocator(interval=0)
with pytest.raises(ValueError):
mdates.DayLocator(interval=1.3)
mdates.DayLocator(interval=1.0)
def test_tz_utc():
dt = datetime.datetime(1970, 1, 1, tzinfo=mdates.UTC)
dt.tzname()
@pytest.mark.parametrize("x, tdelta",
[(1, datetime.timedelta(days=1)),
([1, 1.5], [datetime.timedelta(days=1),
datetime.timedelta(days=1.5)])])
def test_num2timedelta(x, tdelta):
dt = mdates.num2timedelta(x)
assert dt == tdelta
| mit |
vortex-ape/scikit-learn | sklearn/preprocessing/tests/test_common.py | 21 | 5426 | import warnings
import pytest
import numpy as np
from scipy import sparse
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.base import clone
from sklearn.preprocessing import maxabs_scale
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import scale
from sklearn.preprocessing import power_transform
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import robust_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import RobustScaler
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
iris = load_iris()
def _get_valid_samples_by_column(X, col):
"""Get non NaN samples in column of X"""
return X[:, [col]][~np.isnan(X[:, col])]
@pytest.mark.parametrize(
"est, func, support_sparse, strictly_positive",
[(MaxAbsScaler(), maxabs_scale, True, False),
(MinMaxScaler(), minmax_scale, False, False),
(StandardScaler(), scale, False, False),
(StandardScaler(with_mean=False), scale, True, False),
(PowerTransformer('yeo-johnson'), power_transform, False, False),
(PowerTransformer('box-cox'), power_transform, False, True),
(QuantileTransformer(n_quantiles=10), quantile_transform, True, False),
(RobustScaler(), robust_scale, False, False),
(RobustScaler(with_centering=False), robust_scale, True, False)]
)
def test_missing_value_handling(est, func, support_sparse, strictly_positive):
# check that the preprocessing method let pass nan
rng = np.random.RandomState(42)
X = iris.data.copy()
n_missing = 50
X[rng.randint(X.shape[0], size=n_missing),
rng.randint(X.shape[1], size=n_missing)] = np.nan
if strictly_positive:
X += np.nanmin(X) + 0.1
X_train, X_test = train_test_split(X, random_state=1)
# sanity check
assert not np.all(np.isnan(X_train), axis=0).any()
assert np.any(np.isnan(X_train), axis=0).all()
assert np.any(np.isnan(X_test), axis=0).all()
X_test[:, 0] = np.nan # make sure this boundary case is tested
with pytest.warns(None) as records:
Xt = est.fit(X_train).transform(X_test)
# ensure no warnings are raised
assert len(records) == 0
# missing values should still be missing, and only them
assert_array_equal(np.isnan(Xt), np.isnan(X_test))
# check that the function leads to the same results as the class
with pytest.warns(None) as records:
Xt_class = est.transform(X_train)
assert len(records) == 0
Xt_func = func(X_train, **est.get_params())
assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class))
assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)])
# check that the inverse transform keep NaN
Xt_inv = est.inverse_transform(Xt)
assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test))
# FIXME: we can introduce equal_nan=True in recent version of numpy.
# For the moment which just check that non-NaN values are almost equal.
assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)])
for i in range(X.shape[1]):
# train only on non-NaN
est.fit(_get_valid_samples_by_column(X_train, i))
# check transforming with NaN works even when training without NaN
with pytest.warns(None) as records:
Xt_col = est.transform(X_test[:, [i]])
assert len(records) == 0
assert_allclose(Xt_col, Xt[:, [i]])
# check non-NaN is handled as before - the 1st column is all nan
if not np.isnan(X_test[:, i]).all():
Xt_col_nonan = est.transform(
_get_valid_samples_by_column(X_test, i))
assert_array_equal(Xt_col_nonan,
Xt_col[~np.isnan(Xt_col.squeeze())])
if support_sparse:
est_dense = clone(est)
est_sparse = clone(est)
with pytest.warns(None) as records:
Xt_dense = est_dense.fit(X_train).transform(X_test)
Xt_inv_dense = est_dense.inverse_transform(Xt_dense)
assert len(records) == 0
for sparse_constructor in (sparse.csr_matrix, sparse.csc_matrix,
sparse.bsr_matrix, sparse.coo_matrix,
sparse.dia_matrix, sparse.dok_matrix,
sparse.lil_matrix):
# check that the dense and sparse inputs lead to the same results
# precompute the matrix to avoid catching side warnings
X_train_sp = sparse_constructor(X_train)
X_test_sp = sparse_constructor(X_test)
with pytest.warns(None) as records:
warnings.simplefilter('ignore', PendingDeprecationWarning)
Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp)
assert len(records) == 0
assert_allclose(Xt_sp.A, Xt_dense)
with pytest.warns(None) as records:
warnings.simplefilter('ignore', PendingDeprecationWarning)
Xt_inv_sp = est_sparse.inverse_transform(Xt_sp)
assert len(records) == 0
assert_allclose(Xt_inv_sp.A, Xt_inv_dense)
| bsd-3-clause |
toolforger/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
thekingofkings/chicago-crime | python/tract.py | 2 | 4648 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 18 16:33:22 2016
@author: hj
"""
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Polygon, box
import shapefile
import os
here = os.path.dirname(os.path.abspath(__file__))
import matplotlib
matplotlib.rc('pdf', fonttype=42)
class Tract:
def __init__( self, shp, rec=None ):
"""
Build one Tract object from the shapefile._Shape object
"""
self.bbox = box(*shp.bbox)
self.polygon = Polygon(shp.points)
self.count = {'total': 0} # type: value
self.timeHist = {'total': np.zeros(24)}
if rec != None:
self.CA = rec[7]
def containCrime( self, cr ):
"""
return true if the cr record happened within current tract
"""
if self.bbox.contains(cr.point):
if self.polygon.contains(cr.point):
return True
return False
def plotTimeHist(self, keys=None):
"""
Plot the crime time histogram
"""
if len(self.timeHist) == 1:
return
else:
if keys is None:
keys = self.timeHist.keys()
values = [self.timeHist[key] for key in keys]
plt.figure()
for val in values:
plt.plot(val)
plt.legend(keys)
plt.show()
@classmethod
def createAllTractObjects( cls ):
cls.sf = shapefile.Reader(here + '/../data/Census-Tracts-2010/chicago-tract')
cls.tracts = {}
shps = cls.sf.shapes()
for idx, shp in enumerate(shps):
rec = cls.sf.record(idx)
tid = int(rec[2])
trt = Tract(shp, rec)
cls.tracts[tid] = trt
return cls.tracts
@classmethod
def createAllCAObjects( cls ):
cls.casf = shapefile.Reader(here + '/../data/ChiCA_gps/ChiCaGPS')
cls.cas = {}
shps = cls.casf.shapes()
for idx, shp in enumerate(shps):
tid = cls.casf.record(idx)[4]
trt = Tract(shp)
cls.cas[int(tid)] = trt
return cls.cas
@classmethod
def visualizeRegions(cls, residence=[], nightlife=[], professional=[], airport=[76]):
if hasattr(cls, "cas"):
r = cls.cas
elif hasattr(cls, "tracts"):
r = cls.tracts
from descartes import PolygonPatch
f = plt.figure(figsize=(6,6))
ax = f.gca()
for k, s in r.items():
if k in residence:
clr = "blue"
p = s.polygon.centroid
ax.annotate(s = str(k), xy=(p.x, p.y), xytext=(-10, -5), textcoords="offset points", fontsize=12)
elif k in nightlife:
clr = "red"
p = s.polygon.centroid
ax.annotate(s = str(k), xy=(p.x, p.y), xytext=(-10, -5), textcoords="offset points", fontsize=12)
elif k in professional:
clr = "green"
p = s.polygon.centroid
if k != 47:
ax.annotate(s = str(k), xy=(p.x, p.y), xytext=(-10, -5), textcoords="offset points", fontsize=12)
else:
ax.annotate(s = str(k), xy=(p.x, p.y), xytext=(-10, -22), textcoords="offset points", fontsize=12,
arrowprops=dict(width=2, headwidth=5, headlength=5, shrink=0.05, facecolor="black"))
elif k in airport:
clr = "cyan"
p = s.polygon.centroid
ax.annotate(s = str(k), xy=(p.x, p.y), xytext=(-10, -5), textcoords="offset points", fontsize=12)
else:
clr = "white"
ax.add_patch(PolygonPatch(s.polygon, alpha=0.5, fc=clr))
ax.axis("scaled")
ax.axis("off")
plt.tight_layout()
plt.savefig("case-region-on-map.pdf")
# plt.show()
def plotCA_cases():
Tract.createAllCAObjects()
Tract.visualizeRegions(residence=[13,14,15,16], nightlife=[8,32,33], professional=[44,45,47,48])
if __name__ == "__main__":
import sys
Tract.createAllTractObjects()
if len(sys.argv) > 1 and sys.argv[1] == "tractProfile":
rsd = [280100, 320100, 81500, 81403]
nl = []
pf = []
Tract.visualizeRegions(residence=rsd, nightlife=nl, professional=pf)
else:
plotCA_cases()
| mit |
ATNF/askapsdp | Code/Components/Analysis/evaluation/current/scripts/fluxEvalOld.py | 1 | 12707 | #!/usr/bin/env python
"""
"""
import askap.analysis.evaluation
#from matplotlib import *
from pylab import *
from numpy import *
import os
from askap.analysis.evaluation.readData import *
from askap.analysis.evaluation.readDataOLD import *
from askap.analysis.evaluation.distributionPlots import *
from askap.analysis.evaluation.utils import *
from optparse import OptionParser
import askap.parset as parset
#############
#global plotcount
def nextplot (plotcount):
""" """
subplot(4,5,plotcount)
plotcount=plotcount+1
return plotcount
#############
def bigBoxPlot (xvals, yvals, isGood, isLog=True):
""" """
minval = log10(min(xvals))
maxval = log10(max(xvals))
delta = (maxval-minval)/10.
for i in range(10):
xpt = (minval+delta/2.)+i*delta
t = yvals[isGood * (abs(log10(xvals) - xpt)<delta/2.)]
if len(t)>0:
boxplot(t,positions=[10**xpt],widths=0.9*(10**(minval+(i+1)*delta)-10**(minval+i*delta)),sym='')
semilogx(basex=10.)
# axis([min(xvals)*0.9,max(xvals)*1.1,axisrange[2],axisrange[3]])
xlim(min(xvals)*0.9,max(xvals)*1.1)
#############
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i","--inputs", dest="inputfile", default="", help="Input parameter file [default: %default]")
(options, args) = parser.parse_args()
if(options.inputfile==''):
inputPars = parset.ParameterSet()
elif(not os.path.exists(options.inputfile)):
print "Input file %s does not exist!\nUsing default parameter values."%options.inputfile
inputPars = parset.ParameterSet()
else:
inputPars = parset.ParameterSet(options.inputfile).fluxEval
matchfile = inputPars.get_value('matchfile',"matches.txt")
missfile = inputPars.get_value('missfile',"misses.txt")
if(not os.access(matchfile,os.F_OK)):
print "Match file %s does not exist. Doing no evaluation"%matchfile
exit(0)
if(not os.access(missfile,os.F_OK)):
print "Miss file %s does not exist. Doing no evaluation"%missfile
exit(0)
matchType,idS,xS,yS,fS,aS,bS,pS,chisq,imagerms,rms,nfree,ndof,npf,npo,idR,xR,yR,fR,aR,bR,pR = read_match_data(matchfile)
missType,id,x,y,f,chisq2,imagerms2,rms2,nfree2,ndof2,npf2,npo2 = read_miss_data(missfile)
# fluxScaling = 1.e6
fluxScaling = 1.
fS = fS * fluxScaling
fR = fR * fluxScaling
imagerms = imagerms * fluxScaling
if(size(x)>0):
print "Match list size = %d, Miss list size = %d (%d source and %d reference)"%(size(xS),size(x),size(missType[missType=='S']),size(missType[missType=='R']))
else:
print "Match list size = %d, Miss list size = %d"%(size(xS),size(x))
dF = fS - fR
rdF = 100.*dF/fR
snr = fS / imagerms
xSav=mean(xS)
ySav=mean(yS)
radius = sqrt((xS-xSav)*(xS-xSav)+(yS-ySav)*(yS-ySav))
azimuth = arctan(abs(yS-ySav)/abs(xS-xSav)) * 180. / math.pi
for i in range(len(azimuth)):
if(yS[i]>ySav):
if(xS[i]<xSav):
azimuth[i] = 180. - azimuth[i]
else:
if(xS[i]<xSav):
azimuth[i] = 180. + azimuth[i]
else:
azimuth[i] = 360. - azimuth[i]
azimuth = azimuth % 360.
area = math.pi * aS * bS / 4.
numComp = (npf-ndof+1)/nfree
numNeighbours = zeros(len(xS))
for i in range(len(xS)):
for j in range(len(x)):
if(missType[j]=='R'):
dist = sqrt((x[j]-xS[i])*(x[j]-xS[i]) + (y[j]-yS[i])*(y[j]-yS[i]))
if(dist<30.):
numNeighbours[i]+=1
#################################################
print "Fraction with |dS/S|<10%% = %5.2f%%"%(100.*size(rdF[abs(rdF)<10])/float(size(rdF)))
print "Fraction with |dS/S|<20%% = %5.2f%%"%(100.*size(rdF[abs(rdF)<20])/float(size(rdF)))
print "Fraction with |dS/S|<30%% = %5.2f%%"%(100.*size(rdF[abs(rdF)<30])/float(size(rdF)))
print "Fraction with dS/S>30%% = %5.2f%%"%(100.*size(rdF[rdF>30])/float(size(rdF)))
print ""
dFgood = dF[npf>0]
print "Mean of dS = %10.6f"%(mean(dFgood))
print "Median of dS = %10.6f"%(median(dFgood))
print "RMS of dS = %10.6f"%(std(dFgood))
print "MADFM of dS = %10.6f = %10.6f as RMS"%(madfm(dFgood),madfmToRMS(madfm(dFgood)))
print "Average of the ImageRMS values = %10.6f"%(mean(imagerms[npf>0]))
print "Weighted average of the ImageRMS values = %10.6f"%(sum(imagerms[npf>0]*npf[npf>0])/(sum(npf[npf>0])*1.))
goodfit = npf>0
# ind = argsort(rdF)[goodfit[argsort(rdF)]]
ind = array(range(len(rdF)))[goodfit]
#################################################
print "\nDoing plot of flux errors"
figure(1, figsize=(16.5,11.7), dpi=72)
font = {'fontsize' : '8'}
legfont = {'fontsize' : '4'}
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
subplots_adjust(wspace=0.3,hspace=0.3)
plotcount=1
for loop in range(2):
if(loop==0):
arr = dF
lab = r'$\Delta S$'
else:
arr = rdF
percent='%'
lab = r'$\Delta S/S_{\rm cat} [\%s]$'%percent
plotcount = nextplot(plotcount)
n, bins, patches = hist(arr[ind], 20)
xlabel(lab,font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
mu=median(arr)
sigma=madfmToRMS(madfm(arr))
upper=mu+3.*sigma
lower=mu-3.*sigma
upper = int(ceil(upper/10)*10.)
lower = int(floor(lower/10)*10.)
n, bins, patches = hist(arr, 20, range=[lower,upper], normed=1)
axisrange = axis()
ytemp1 = normpdf(bins,mu,sigma)
l1 = plot(bins, ytemp1, 'r-',label=r"$\Delta S$ mean&rms")
if(loop==0):
ytemp2 = normpdf(bins,mu,mean(imagerms[npf>0]))
l2 = plot(bins, ytemp2*max(ytemp1)/max(ytemp2), 'g-', label="image rms")
axisrange = axis()
axis([lower,upper,axisrange[2],axisrange[3]])
setp(l1, 'linewidth', 2)
if(loop==0):
setp(l2, 'linewidth', 2)
xlabel(lab,font)
ylabel('Number',font)
legend()
plotcount = nextplot(plotcount)
temparr = arr[goodfit * (nfree==3)]
n, bins, patches = hist(temparr, bins=20, range=(min(arr[goodfit]),max(arr[goodfit])), fill=False, ec='red')
temparr = arr[goodfit * (nfree==6)]
n, bins, patches = hist(temparr, bins=20, range=(min(arr[goodfit]),max(arr[goodfit])), fill=False, ec='green')
xlabel(lab,font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
for i in ind:
plot([radius[i]],[arr[i]],'o')
xlabel(r'Distance from field centre [arcsec]',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([azimuth[i]],[arr[i]],'o')
axisrange=axis()
axis([0.,360.,axisrange[2],axisrange[3]])
xlabel(r'Azimuth around field centre [deg]',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([fS[i]],[arr[i]],'o')
xlabel(r'$S_{\rm Fit}$',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([fS[i]],[arr[i]],'o')
semilogx(basex=10.)
axisrange = axis()
axis([min(fS)*0.9,max(fS)*1.1,axisrange[2],axisrange[3]])
xlabel(r'$\log_{10}(S_{\rm Fit})$',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
bigBoxPlot(fS,arr,goodfit)
xlabel(r'$\log_{10}(S_{\rm Fit})$',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([snr[i]],[arr[i]],'o')
semilogx(basex=10.)
axisrange = axis()
axis([min(snr)*0.9,max(snr)*1.1,axisrange[2],axisrange[3]])
xlabel(r'$\log_{10}(S/N (Fit))$',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
bigBoxPlot(snr,arr,goodfit)
xlabel(r'$\log_{10}(S/N (Fit))$',font)
ylabel(lab,font)
# show()
print "Saving to fluxEval.png"
savefig('fluxEval')
close()
#################################################
for loop in range(2):
if(loop==0):
loopname="Absolute"
arr = dF
lab = r'$\Delta S$'
figname = "fitEval_AbsErr"
else:
loopname="Relative"
arr = rdF
percent='%'
lab = r'$\Delta S/S_{\rm cat} [\%s]$'%percent
figname = "fitEval_RelErr"
print "Doing plot of fit parameters for %s Flux Errors"%loopname
figure(2, figsize=(16.5,11.7), dpi=72)
font = {'fontsize' : '8'}
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
subplots_adjust(wspace=0.3,hspace=0.3)
plotcount=1
plotcount = nextplot(plotcount)
n, bins, patches = hist(aS[ind], 20)
xlabel(r'Major axis of fit [arcsec]',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(bS[ind], 20)
xlabel(r'Minor axis of fit [arcsec]',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(pS[ind], 20)
xlabel(r'Position angle of fit [deg]',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(aS[ind]/bS[ind], 20)
xlabel(r'Axial ratio of fit',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(rms[ind], 20)
xlabel(r'RMS of fit',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
for i in ind:
plot([aS[i]],[arr[i]],'o')
xlabel(r'Major axis of fit [arcsec]',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([bS[i]],[arr[i]],'o')
xlabel(r'Minor axis of fit [arcsec]',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([pS[i]],[arr[i]],'o')
xlabel(r'Position angle of fit [deg]',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([aS[i]/bS[i]],[arr[i]],'o')
xlabel(r'Axial ratio of fit',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([rms[i]],[arr[i]],'o')
xlabel(r'RMS of fit',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(chisq[ind], 20)
xlabel(r'$\chi^2$ of fit',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(chisq[ind]/ndof[ind], 20)
xlabel(r'$\chi^2/\nu$ of fit',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
n, bins, patches = hist(area[ind], 20)
xlabel(r'Area of fitted Gaussian',font)
ylabel('Number',font)
plotcount = nextplot(plotcount)
for i in ind:
plot([npf[i]],[arr[i]],'o')
xlabel(r'Number of pixels in fit',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([npo[i]],[arr[i]],'o')
xlabel(r'Number of pixels in object',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([chisq[i]],[arr[i]],'o')
xlabel(r'$\chi^2$ of fit',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([chisq[i]/ndof[i]],[arr[i]],'o')
xlabel(r'$\chi^2/\nu$ of fit',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([area[i]],[arr[i]],'o')
xlabel(r'Area of fitted Gaussian',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([numComp[i]],[arr[i]],'o')
axisrange = axis()
axis([0,max(numComp)+1,axisrange[2],axisrange[3]])
xlabel(r'Number of fitted components',font)
ylabel(lab,font)
plotcount = nextplot(plotcount)
for i in ind:
plot([numNeighbours[i]],[arr[i]],'o')
axisrange = axis()
axis([-0.5,max(numNeighbours)+1.5,axisrange[2],axisrange[3]])
xlabel(r'No. of unmatched nearby catalogue sources',font)
ylabel(lab,font)
print "Saving to %s.png"%figname
savefig(figname)
close()
# end of: for loop in range(2)
#################################################
| gpl-2.0 |
dean0x7d/pybinding | pybinding/parallel.py | 1 | 13695 | """Multi-threaded functions for parameter sweeps"""
import sys
import inspect
import itertools
from copy import copy
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from pybinding.support.inspect import get_call_signature
from . import _cpp
from .utils import cpuinfo, progressbar, decorator_decorator
from .results import Sweep, NDSweep
__all__ = ['num_cores', 'parallel_for', 'parallelize', 'sweep', 'ndsweep']
num_cores = cpuinfo.physical_core_count()
def _sequential_for(sequence, produce, retire):
"""Simple single-threaded for loop"""
for idx, var in enumerate(sequence):
deferred = produce(var)
deferred.compute()
retire(deferred, idx)
def _parallel_for(sequence, produce, retire, num_threads=num_cores, queue_size=num_cores):
"""Multi-threaded for loop
See the implementation of `_sequential_for` to get the basic idea. This parallel
version is functionally identical but executes on multiple threads simultaniously
thanks to C++. The `produce` function must return a Deferred compute object which
has a `compute()` method and a `result` field.
Everything is implemented in C++. This is just a wrapper which sets the default
values of `num_threads` and `queue_size` to the number of physical cores.
Parameters
----------
sequence : array_like
The for loop will iterate over this.
produce : callable
Takes a value from `sequence` and returns a `Deferred` compute object.
retire : callable
Takes the computed `Deferred` object and 'idx' which indicates the index
of the value in `sequence` which was just computed.
num_threads : int
Number of thread that will run in parallel.
queue_size : int
Number of `Deferred` jobs to be queued up for consumption by the worker
threads. The maximum number of jobs that will be kept in memory at any
one time will be `queue_size` + `num_threads`.
Examples
--------
::
def produce(var):
model = pb.Model(...) # something that depends on var
greens = pb.greens.kpm(model)
return greens.deferred_ldos(...) # may also depend on var
def retire(deferred, idx):
print(deferred.result)
_parallel_for(np.linspace(0, 1, 50), produce, retire)
"""
_cpp.parallel_for(sequence, produce, retire, num_threads, queue_size)
class Hooks:
"""Holds functions which hook into `ParallelFor`
Attributes
----------
first : list of callable
Called only once after the first `Deferred` is produced.
status : list of callable
Called every time a `Deferred` job is computed. As arguments it takes
a `report` string, `idx` of the original value and `count` the number
of job that have been computed so far.
plot : list of callable
Called once in a while with a `result` argument to be plotted.
"""
def __init__(self):
self.first = []
self.status = []
self.plot = []
class Config:
"""Configuration variables for `ParallelFor`
Attributes
----------
callsig : CallSignature
Signature of the function call which made the parallel `Factory`.
Used for automatic configuration.
filename : str
The name of the file (without an extension) for various files which will be
produced. The computed data will be saved with the '.pbz' extension, plots
with '.png', progress log with '.log', etc.
num_threads, queue_size : int
Forwarded to `_parallel_for`.
save_every : float
A 0 to 100 percentage points interval to save and plot the data.
pbar_fd : {sys.stdout, sys.stderr, None}
Output stream. The progress bar is always the last line of output.
"""
def __init__(self, callsig, num_threads, queue_size):
self.callsig = callsig
self.num_threads = num_threads
self.queue_size = queue_size
self.filename = self.make_filename(callsig)
self.save_every = 10.0
self.pbar_fd = sys.stdout
def make_save_set(self, total):
save_at = {int(total * p) for p in np.arange(0, 1, self.save_every / 100)}
save_at.remove(0)
save_at.add(total) # make sure progress is saved on the last iteration
return save_at
@staticmethod
def make_filename(callsig):
invalid_chars = " /.,"
filename = "".join("{:.1s}{}".format(k, v) for k, v in callsig.named_args.items())
if not filename:
filename = "data"
return "".join(c for c in filename if c not in invalid_chars)
class DefaultStatus:
"""Default status reporter"""
def __init__(self, params, sequence):
self.params = params
self.sequence = sequence
size = len(sequence)
count_width = len(str(size))
vars_width = max(len(self._vars(idx)) for idx in range(size))
self.template = "{{count:{}}}| {{vars:{}}} | {{report}}".format(count_width, vars_width)
def _vars(self, idx):
return ", ".join("{} = {:.2g}".format(k, v)
for k, v in zip(self.params, self.sequence[idx]))
def __call__(self, deferred, idx, count):
report = deferred.solver.report(shortform=True)
print(self.template.format(vars=self._vars(idx), **locals()))
class Factory:
"""Produces `Deferred` jobs for `ParallelFor`
Attributes
----------
variables : tuple of array_like
Parameters which change while iterating.
fixtures : dict
Constant parameters.
sequence : list
Product of `variables`. The loop will iterate over its values.
produce : callable
Takes a value from `sequence` and returns a `Deferred` compute object.
config : Config
hooks : Hooks
"""
def __init__(self, variables, fixtures, produce, config):
self.variables = variables
self.fixtures = fixtures
self.produce = produce
self.config = config
self.sequence = list(itertools.product(*variables))
self.hooks = Hooks()
self.hooks.status.append(DefaultStatus(
inspect.signature(self.produce).parameters, self.sequence
))
class ParallelFor:
"""Keep track of progress while running `_parallel_for`
Parameters
----------
factory : Factory
Produces Deferred compute kernels.
make_result : callable
Creates the final result from raw data. See `_make_result` prototype.
"""
def __init__(self, factory, make_result=None):
self.factory = factory
self.hooks = factory.hooks
self.config = factory.config
if make_result:
self._make_result = make_result
size = len(factory.sequence)
self.save_at = self.config.make_save_set(size)
logname = self.config.filename + ".log" if self.config.filename else ""
self.pbar = progressbar.ProgressBar(size, stream=self.config.pbar_fd, filename=logname)
if self.config.num_threads == 1:
self.loop = _sequential_for
else:
self.loop = partial(_parallel_for, num_threads=self.config.num_threads,
queue_size=self.config.queue_size)
self.called_first = False
self.result = None
self.data = [None] * size
@staticmethod
def _make_result(data):
return data
def _produce(self, var):
deferred = self.factory.produce(*var, **self.factory.fixtures)
if not self.called_first:
self._first(deferred)
self.called_first = True
self.pbar.refresh()
return deferred
def _first(self, deferred):
for f in self.hooks.first:
f(deferred)
def _retire(self, deferred, idx):
self.data[idx] = copy(deferred.result)
count = self.pbar.value + 1
self._status(deferred, idx, count)
self.pbar += 1 # also refreshes output stream
if count in self.save_at:
result = self._make_result(self.data)
self.result = copy(result) # _plot() may modify the local
self._save(result)
self._plot(result)
def _status(self, deferred, idx, count):
for f in self.hooks.status:
f(deferred, idx, count)
def _save(self, result):
if not self.config.filename:
return
from .support.pickle import save
save(result, self.config.filename)
def _plot(self, result):
if not self.config.filename:
return
try:
if self.hooks.plot:
for f in self.hooks.plot:
f(result)
plt.savefig(self.config.filename + ".png")
plt.close()
except Exception as err:
print(err)
def __call__(self):
self.called_first = False
with self.pbar:
self.loop(self.factory.sequence, self._produce, self._retire)
return self.result
def parallel_for(factory, make_result=None):
"""Multi-threaded loop feed by the `factory` function
Parameters
----------
factory : :func:`Factory <parallelize>`
Factory function created with the :func:`parallelize` decorator.
make_result : callable, optional
Creates the final result from raw data. This result is also the
final return value of :func:`parallel_for`.
Returns
-------
array_like
A result for each loop iteration.
Examples
--------
::
@parallelize(x=np.linspace(0, 1, 10))
def factory(x):
pb.Model(...) # depends on `x`
greens = pb.greens.kpm(model)
return greens.deferred_ldos(...) # may also depend on `x`
results = parallel_for(factory)
"""
return ParallelFor(factory, make_result)()
@decorator_decorator
def parallelize(num_threads=num_cores, queue_size=num_cores, **kwargs):
"""parallelize(num_threads=num_cores, queue_size=num_cores, **kwargs)
A decorator which creates factory functions for :func:`parallel_for`
The decorated function must return a `Deferred` compute kernel.
Parameters
----------
num_threads : int
Number of threads that will run in parallel. Defaults to the number of
cores in the current machine.
queue_size : int
Number of `Deferred` jobs to be queued up for consumption by the worker
threads. The maximum number of jobs that will be kept in memory at any
one time will be `queue_size` + `num_threads`.
**kwargs
Variables which will be iterated over in :func:`parallel_for`
and passed to the decorated function. See example.
Examples
--------
::
@parallelize(a=np.linspace(0, 1, 10), b=np.linspace(-2, 2, 10))
def factory(a, b):
pb.Model(...) # depends on `a` and `b`
greens = pb.greens.kpm(model)
return greens.deferred_ldos(...) # may also depend on `a` and `b`
results = parallel_for(factory)
"""
callsig = kwargs.pop('callsig', None)
if not callsig:
callsig = get_call_signature(up=2)
def decorator(produce_func):
params = inspect.signature(produce_func).parameters
variables = tuple(kwargs[k] for k in params if k in kwargs)
fixtures = {k: v.default for k, v in params.items() if k not in kwargs}
return Factory(variables, fixtures, produce_func,
Config(callsig, num_threads, queue_size))
return decorator
def sweep(factory, plot=lambda r: r.plot(), labels=None, tags=None, silent=False):
"""Do a multi-threaded parameter sweep
Parameters
----------
factory : :func:`Factory <parallelize>`
Factory function created with the :func:`parallelize` decorator.
plot : callable
Plotting functions which takes a :class:`.Sweep` result as its only argument.
labels, tags : dict
Forwarded to :class:`.Sweep` object.
silent : bool
Don't print status messages.
Returns
-------
:class:`~pybinding.Sweep`
"""
x = factory.variables[0]
energy = factory.fixtures['energy']
zero = np.zeros_like(energy, np.float32)
def make_result(data):
sweep_data = np.vstack([v.squeeze() if v is not None else zero for v in data])
return Sweep(x, energy, sweep_data, labels, tags)
if silent:
factory.hooks.status.clear()
if plot:
factory.hooks.plot.append(plot)
return parallel_for(factory, make_result)
def ndsweep(factory, plot=None, labels=None, tags=None, silent=False):
"""Do a multi-threaded n-dimensional parameter sweep
Parameters
----------
factory : :func:`Factory <parallelize>`
Factory function created with the :func:`parallelize` decorator.
plot : callable
Plotting functions which takes a :class:`.NDSweep` result as its only argument.
labels, tags : dict
Forwarded to :class:`.NDSweep` object.
silent : bool
Don't print status messages.
Returns
-------
:class:`~pybinding.NDSweep`
"""
energy = factory.fixtures['energy']
variables = factory.variables + (energy,)
zero = np.zeros_like(energy, np.float32)
def make_result(data):
sweep_data = np.vstack([v.squeeze() if v is not None else zero for v in data])
return NDSweep(variables, sweep_data, labels, tags)
if silent:
factory.hooks.status.clear()
if plot:
factory.hooks.plot.append(plot)
return parallel_for(factory, make_result)
| bsd-2-clause |
hjl/cntkdemo | AlexNetDemo/conv-image-util.py | 1 | 2788 | # helper functions for working with convnet visualization images
from PIL import Image as PILImage
import numpy as np
import sys, os
import cv2
from matplotlib import pyplot as plt
def make_filter_weight_image(filter_weights, image_file_name, dim=11):
imstack = np.dstack((filter_weights[0], filter_weights[1], filter_weights[2]))
# hacky range normalization so we can see something
immean = imstack.mean()
#print(immean)
imstack -= immean
imstack = imstack * (255.0/imstack.max())
imstack += 127.
#print (imstack[0])
imstack = np.clip(imstack, 0., 255.)
imint = (imstack * (255.0 / imstack.max())).astype('uint8')
#imint = imstack.astype('uint8')
#print(imint[0])
try:
os.remove(image_file_name)
except OSError:
pass
im = PILImage.fromarray(imint)
im2 = im.resize((224,224))
im2.save(image_file_name)
def read_conv1_filter_values(filter_json_file_name):
f = open (filter_json_file_name)
filters = list()
for line in f:
filters.append(json.loads(line))
print(filters)
return filters
def demo_minoru_stereo():
cap1 = cv2.VideoCapture(1)
cap2 = cv2.VideoCapture(2)
# reasonable setting for daylight in office is exposure = -10.0, brightness = -10.0, contrast = 15.0
exposure = -10.0 # minoru range is 0.0 max to -10.0 min
brightness = -10.0 # minoru range is 10.0 max to -10.0 min
contrast = 15.0 # minoru range is 20.0 max to 0.0 min
count = 0
while True:
count = count + 1
print("count is " + repr(count))
cap1.set(cv2.CAP_PROP_EXPOSURE, exposure)
cap2.set(cv2.CAP_PROP_EXPOSURE, exposure)
print("exposure is " + repr(exposure) + ", read back " + repr(cap1.get(cv2.CAP_PROP_EXPOSURE)))
# exposure = exposure - 1.0
cap1.set(cv2.CAP_PROP_BRIGHTNESS, brightness)
cap2.set(cv2.CAP_PROP_BRIGHTNESS, brightness)
print("brightness is " + repr(brightness), ", read back " + repr(cap1.get(cv2.CAP_PROP_BRIGHTNESS)))
#brightness = brightness - 1.0
cap1.set(cv2.CAP_PROP_CONTRAST, contrast)
cap2.set(cv2.CAP_PROP_CONTRAST, contrast)
print("contrast is " + repr(contrast) + ", read back " + repr(cap1.get(cv2.CAP_PROP_CONTRAST)))
#contrast = contrast - 1.0
ret1, frame1 = cap1.read()
ret2, frame2 = cap2.read()
#img1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
#img2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
cv2.imshow('cap1', frame1)
cv2.imshow('cap2', frame2)
if count == 5:
cv2.imwrite('.\\data\\minoru-left.png', frame1)
cv2.imwrite('.\\data\\minoru-right.png', frame2)
cv2.waitKey(100)
cap1.release()
cap2.release()
return
| mit |
magnunor/hyperspy | hyperspy/drawing/signal.py | 5 | 4534 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# This file contains plotting code generic to the BaseSignal class.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from traits.api import Undefined
from hyperspy.drawing.utils import set_axes_decor
def _plot_1D_component(factors, idx, axes_manager, ax=None,
calibrate=True, comp_label=None,
same_window=False):
if ax is None:
ax = plt.gca()
axis = axes_manager.signal_axes[0]
if calibrate:
x = axis.axis
plt.xlabel(axis.units)
else:
x = np.arange(axis.size)
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%i' % idx)
if comp_label and not same_window:
plt.title('%s' % comp_label)
return ax
def _plot_2D_component(factors, idx, axes_manager,
calibrate=True, ax=None,
comp_label=None, cmap=plt.cm.gray,
axes_decor='all'
):
if ax is None:
ax = plt.gca()
axes = axes_manager.signal_axes[::-1]
shape = axes_manager._signal_shape_in_array
extent = None
if calibrate:
extent = (axes[1].low_value,
axes[1].high_value,
axes[0].high_value,
axes[0].low_value)
if comp_label:
plt.title('%s' % idx)
im = ax.imshow(factors[:, idx].reshape(shape),
cmap=cmap, interpolation='nearest',
extent=extent)
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label=None, no_nans=True,
calibrate=True, cmap=plt.cm.gray,
same_window=False, axes_decor='all'):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
axes = axes_manager.navigation_axes
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes[0].low_value,
axes[0].high_value,
axes[1].high_value,
axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape),
cmap=cmap, extent=extent,
interpolation='nearest')
if calibrate:
plt.xlabel(axes[0].units)
plt.ylabel(axes[1].units)
else:
plt.xlabel('pixels')
plt.ylabel('pixels')
if comp_label:
if same_window:
plt.title('%s' % idx)
else:
plt.title('%s #%s' % (comp_label, idx))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes[0].axis
else:
x = np.arange(axes[0].size)
ax.step(x, loadings[idx],
label='%s' % idx)
if comp_label and not same_window:
plt.title('%s #%s' % (comp_label, idx))
plt.ylabel('Score (a. u.)')
if calibrate:
if axes[0].units is not Undefined:
plt.xlabel(axes[0].units)
else:
plt.xlabel('depth')
else:
plt.xlabel('depth')
else:
raise ValueError('View not supported')
| gpl-3.0 |
gnychis/grforwarder | gr-utils/src/python/plot_data.py | 10 | 5841 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
PredictiveScienceLab/GPy | GPy/examples/non_gaussian.py | 14 | 10700 | # Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import GPy
import numpy as np
from GPy.util import datasets
try:
import matplotlib.pyplot as plt
except:
pass
def student_t_approx(optimize=True, plot=True):
"""
Example of regressing with a student t likelihood using Laplace
"""
real_std = 0.1
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
Yc = Y.copy()
X_full = np.linspace(0.0, np.pi*2, 500)[:, None]
Y_full = np.sin(X_full)
Y_full = Y_full/Y_full.max()
#Slightly noisy data
Yc[75:80] += 1
#Very noisy data
#Yc[10] += 100
#Yc[25] += 10
#Yc[23] += 10
#Yc[26] += 1000
#Yc[24] += 10
#Yc = Yc/Yc.max()
#Add student t random noise to datapoints
deg_free = 1
print("Real noise: ", real_std)
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
# Kernel object
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel3 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel4 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1['.*white'].constrain_fixed(1e-5)
m1.randomize()
#Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2['.*white'].constrain_fixed(1e-5)
m2.randomize()
#Student t GP model on clean data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
m3['.*t_scale2'].constrain_bounded(1e-6, 10.)
m3['.*white'].constrain_fixed(1e-5)
m3.randomize()
#Student t GP model on corrupt data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
m4['.*white'].constrain_fixed(1e-5)
m4.randomize()
print(m4)
debug=True
if debug:
m4.optimize(messages=1)
from matplotlib import pyplot as pb
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, 'rx')
m4.plot()
print(m4)
return m4
if optimize:
optimizer='scg'
print("Clean Gaussian")
m1.optimize(optimizer, messages=1)
print("Corrupt Gaussian")
m2.optimize(optimizer, messages=1)
print("Clean student t")
m3.optimize(optimizer, messages=1)
print("Corrupt student t")
m4.optimize(optimizer, messages=1)
if plot:
plt.figure(1)
plt.suptitle('Gaussian likelihood')
ax = plt.subplot(211)
m1.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian clean')
ax = plt.subplot(212)
m2.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian corrupt')
plt.figure(2)
plt.suptitle('Student-t likelihood')
ax = plt.subplot(211)
m3.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm clean')
ax = plt.subplot(212)
m4.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm corrupt')
return m1, m2, m3, m4
def boston_example(optimize=True, plot=True):
raise NotImplementedError("Needs updating")
import sklearn
from sklearn.cross_validation import KFold
optimizer='bfgs'
messages=0
data = datasets.boston_housing()
degrees_freedoms = [3, 5, 8, 10]
X = data['X'].copy()
Y = data['Y'].copy()
X = X-X.mean(axis=0)
X = X/X.std(axis=0)
Y = Y-Y.mean()
Y = Y/Y.std()
num_folds = 10
kf = KFold(len(Y), n_folds=num_folds, indices=True)
num_models = len(degrees_freedoms) + 3 #3 for baseline, gaussian, gaussian laplace approx
score_folds = np.zeros((num_models, num_folds))
pred_density = score_folds.copy()
def rmse(Y, Ystar):
return np.sqrt(np.mean((Y-Ystar)**2))
for n, (train, test) in enumerate(kf):
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
print("Fold {}".format(n))
noise = 1e-1 #np.exp(-2)
rbf_len = 0.5
data_axis_plot = 4
kernelstu = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
kernelgp = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
#Baseline
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
#Gaussian GP
print("Gauss GP")
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
mgp.constrain_fixed('.*white', 1e-5)
mgp['.*len'] = rbf_len
mgp['.*noise'] = noise
print(mgp)
if optimize:
mgp.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mgp.predict(X_test)
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
print(mgp)
print(pred_density)
print("Gaussian Laplace GP")
N, D = Y_train.shape
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D)
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
mg = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=g_likelihood)
mg.constrain_positive('noise_variance')
mg.constrain_fixed('.*white', 1e-5)
mg['rbf_len'] = rbf_len
mg['noise'] = noise
print(mg)
if optimize:
mg.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mg.predict(X_test)
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
print(pred_density)
print(mg)
for stu_num, df in enumerate(degrees_freedoms):
#Student T
print("Student-T GP {}df".format(df))
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise)
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
mstu_t.constrain_fixed('.*white', 1e-5)
mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000)
mstu_t['rbf_len'] = rbf_len
mstu_t['.*t_scale2'] = noise
print(mstu_t)
if optimize:
mstu_t.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mstu_t.predict(X_test)
score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0])
pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test))
print(pred_density)
print(mstu_t)
if plot:
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('GP gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Lap gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Stu t {}df'.format(df))
print("Average scores: {}".format(np.mean(score_folds, 1)))
print("Average pred density: {}".format(np.mean(pred_density, 1)))
if plot:
#Plotting
stu_t_legends = ['Student T, df={}'.format(df) for df in degrees_freedoms]
legends = ['Baseline', 'Gaussian', 'Laplace Approx Gaussian'] + stu_t_legends
#Plot boxplots for RMSE density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('RMSE')
bp = ax.boxplot(score_folds.T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends)
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('RMSE')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
#Plot boxplots for predictive density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('Predictive density')
bp = ax.boxplot(pred_density[1:,:].T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends[1:])
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('Mean Log probability P(Y*|Y)')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
return mstu_t
#def precipitation_example():
#import sklearn
#from sklearn.cross_validation import KFold
#data = datasets.boston_housing()
#X = data['X'].copy()
#Y = data['Y'].copy()
#X = X-X.mean(axis=0)
#X = X/X.std(axis=0)
#Y = Y-Y.mean()
#Y = Y/Y.std()
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
#num_folds = 10
#kf = KFold(len(Y), n_folds=num_folds, indices=True)
#score_folds = np.zeros((4, num_folds))
#def rmse(Y, Ystar):
#return np.sqrt(np.mean((Y-Ystar)**2))
##for train, test in kf:
#for n, (train, test) in enumerate(kf):
#X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
#print "Fold {}".format(n)
| bsd-3-clause |
goddoe/GraphTool | src/GraphTool.py | 1 | 16886 | import numpy as np
import math
from scipy import signal
from openpyxl import Workbook
from openpyxl import load_workbook
#from openpyxl.compat import range
import openpyxl.compat
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilename, asksaveasfilename
import os
from datetime import datetime
class Filterer(object):
def __init__(self):
pass
def gaussianFilter(self, x, window_size=10, std=7):
kernel = signal.gaussian(window_size, std=std)
x_origin = np.copy(x)
x_result = np.zeros(x.shape)
for i, value in enumerate(x_origin):
offset = math.floor(window_size/2.0)
first_idx = i-offset
if first_idx < 0:
first_idx = 0
src = x_origin[first_idx : i+offset +1]
if len(src) != len(kernel):
x_result[i] = x_origin[i]
elif len(src) == len(kernel):
x_result[i] = np.sum( src * kernel / float(window_size))
return x_result
def averageFilter(self, x, window_size=3):
x_origin = np.copy(x)
x_result = np.zeros(x.shape)
for i, value in enumerate(x_origin):
offset = math.floor(window_size/2.0)
first_idx = i-offset
if first_idx < 0:
first_idx = 0
src = x_origin[first_idx: i+offset +1]
if len(src) != window_size:
x_result[i] = x_origin[i]
else:
x_result[i] = np.sum( src / float(window_size))
return x_result
def findPeak(self, x ):
x_result = np.zeros(x.shape)
for i in range(1, len(x)-1):
if x[i] > x[i-1] and x[i] >x[i+1]:
x_result[i] = 1
return x_result
class XlHandler(object):
def __init__(self):
self.wb = None
def getDataFrom(self, start, end):
return
def loadFile(self):
pass
def saveFile(self):
pass
class GraphTool(object):
def __init__(self):
self.mode = "gaussian"
self.wb = None
self.initGui()
def initGui(self):
self.text_size = 6
self.sheet_max_num_in_row = 8
self.root = tk.Tk()
self.root.wm_title("Graph Tool Controller")
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(1, weight=1)
self.initControlFrame(self.root)
self.initGraphFrame(self.root)
for child in self.controlframe.winfo_children():
child.grid_configure(sticky=(tk.W, tk.E ))
#for child in self.graphframe.winfo_children():
# child.grid_configure(sticky=(tk.W, tk.E, tk.N, tk.S ))
self.root.bind('<Return>', lambda event, i=self: i.process())
def initControlFrame(self, root):
###
# controlframe
controlframe = ttk.Frame(root)
controlroot = ttk.Frame(root)
controlroot.grid(row=0, column=0, sticky=(tk.N, tk.W, tk.E))
menuframe = ttk.Frame(controlroot)
controlframe = ttk.Frame(controlroot)
optionframe = ttk.Frame(controlroot)
sheetframe = ttk.Frame(controlroot)
#controlframe.columnconfigure(0, weight=1)
#controlframe.rowconfigure(0, weight=1)
menuframe.grid(row=0,column=0, sticky=(tk.W))
controlframe.grid(row=2, column=0, sticky=(tk.N, tk.W))
optionframe.grid(row=2,column=2, sticky=(tk.E))
sheetframe.grid(row=1, column=0, columnspan=self.sheet_max_num_in_row+1, sticky=(tk.W,tk.E))
# controlframe column configure
#for i in openpyxl.compat.range(4):
# controlframe.columnconfigure(i, weight=3%(i+1) )
### menuframe
ttk.Button(menuframe, text="open file", command=self.openFile).grid(row=0, column=0)
ttk.Button(menuframe, text="save file", command=self.saveFile).grid(row=0, column=1)
self.menuframe = menuframe
### controlframe
self.x_start_var = tk.StringVar()
self.x_end_var = tk.StringVar()
self.y_start_var = tk.StringVar()
self.y_end_var = tk.StringVar()
# x variable
ttk.Label(controlframe, text="x start").grid(row=0, column=0)
ttk.Label(controlframe, text="x end").grid(row=0, column=2)
x_start_entry = ttk.Entry(controlframe, textvariable=self.x_start_var, width=self.text_size)
x_start_entry.grid(row=0, column=1)
x_start_entry.focus()
ttk.Entry(controlframe, textvariable=self.x_end_var, width=self.text_size).grid(row=0, column=3)
# y variable
ttk.Label(controlframe, text="y start").grid(row=1, column=0)
ttk.Label(controlframe, text="y end").grid(row=1, column=2)
ttk.Entry(controlframe, textvariable=self.y_start_var, width=self.text_size).grid(row=1, column=1)
ttk.Entry(controlframe, textvariable=self.y_end_var, width=self.text_size).grid(row=1, column=3)
# Run button
self.controlframe = controlframe
self.current_sheet_text = tk.StringVar()
self.current_sheet_label = tk.Label(sheetframe, textvariable=self.current_sheet_text)
self.current_sheet_label.grid(row=0, column=0, sticky=(tk.W,tk.E))
self.current_sheet_text.set("sheet name")
self.sheetframe = sheetframe
##
# option Frame
## real_time_frame
real_time_frame=ttk.Frame(optionframe)
self.real_time_flag = tk.IntVar()
ttk.Checkbutton(real_time_frame, text="real time", variable=self.real_time_flag).grid(row=0,column=0)
ttk.Button(real_time_frame, text="run", command=self.process).grid(row=1, column=0)
ttk.Label(real_time_frame, text="click run or enter").grid(row=2, column=0)
self.real_time_flag.set(0)
## graph_limit_frame
graph_limit_frame = ttk.Frame(optionframe)
self.graph_limit_flag = tk.IntVar()
self.graph_max_y = tk.DoubleVar()
self.graph_min_y = tk.DoubleVar()
ttk.Checkbutton(graph_limit_frame, text="graph limit", variable=self.graph_limit_flag).grid(row=0, column=0)
ttk.Label(graph_limit_frame, text="max y").grid(row=1, column=0)
ttk.Entry(graph_limit_frame, textvariable=self.graph_max_y, width=self.text_size).grid(row=1, column=1)
ttk.Label(graph_limit_frame, text="min y").grid(row=2, column=0)
ttk.Entry(graph_limit_frame, textvariable=self.graph_min_y, width=self.text_size).grid(row=2, column=1)
self.graph_limit_flag.set(1)
self.graph_max_y.set(140)
self.graph_min_y.set(0)
## filter_original_frame
filter_original_frame = ttk.Frame(optionframe)
#filter_original_frame.grid(row=0, column=0)
self.original_flag = tk.IntVar()
ttk.Checkbutton(filter_original_frame, text="original", variable=self.original_flag).grid(row=0, column=0)
self.original_flag.set(1)
for child in filter_original_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
## filter_gaussian_frame
filter_gaussian_frame = ttk.Frame(optionframe)
#filter_gaussian_frame.grid(row=0, column=1)
self.gaussian_flag = tk.IntVar()
self.gaussian_std = tk.DoubleVar()
self.gaussian_window_size = tk.IntVar()
ttk.Checkbutton(filter_gaussian_frame, text="gaussian filter", variable=self.gaussian_flag).grid(row=0,column=0)
ttk.Label(filter_gaussian_frame, text="window size").grid(row=1, column=0)
ttk.Entry(filter_gaussian_frame, textvariable=self.gaussian_window_size, width=self.text_size).grid(row=1, column=1)
ttk.Label(filter_gaussian_frame, text="std").grid(row=2, column=0)
ttk.Entry(filter_gaussian_frame, textvariable=self.gaussian_std, width=self.text_size).grid(row=2, column=1)
self.gaussian_flag.set(0)
self.gaussian_std.set(3)
self.gaussian_window_size.set(3)
for child in filter_gaussian_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
## filter_average_frame
filter_average_frame = ttk.Frame(optionframe)
#filter_average_frame.grid(row=0, column=2)
self.average_flag = tk.IntVar()
self.average_window_size = tk.IntVar()
ttk.Checkbutton(filter_average_frame, text="average filter", variable=self.average_flag).grid(row=0,column=0)
ttk.Label(filter_average_frame, text="window size").grid(row=1, column=0)
ttk.Entry(filter_average_frame, textvariable=self.average_window_size, width=self.text_size).grid(row=1, column=1)
self.average_flag.set(0)
self.average_window_size.set(3)
for child in filter_average_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
for i, child in enumerate(optionframe.winfo_children()):
child.grid_configure(row=0, column=i, sticky=(tk.W, tk.N))
def initGraphFrame(self, root):
###
# graphframe
graphframe = ttk.Frame(root)
graphframe.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.figure = Figure()
canvas = FigureCanvasTkAgg(self.figure, master=graphframe)
canvas.show()
canvas.get_tk_widget().grid(row=0, column=0)
toolbar = NavigationToolbar2TkAgg(canvas, graphframe)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas = canvas
self.toolbar = toolbar
self.graphframe = graphframe
def update(self):
if self.real_time_flag.get() == 1:
self.process()
self.root.after(100, self.update)
def draw(self):
pass
def process(self):
try:
filterer = Filterer()
x_data = self.ws[self.x_start_var.get():self.x_end_var.get()]
y_data = self.ws[self.y_start_var.get():self.y_end_var.get()]
x = []
y = []
for row in x_data:
for cell in row:
x.append(cell.value)
for row in y_data:
for cell in row:
y.append(cell.value)
# data read part
x = np.array(x)
y = np.array(y)
self.figure.clear()
f = self.figure.add_subplot(111)
if self.original_flag.get() == 1:
f.plot(x,y,color='black', label='original')
if self.average_flag.get() == 1:
y_filtered_with_average = filterer.averageFilter(y,window_size=self.average_window_size.get())
f.plot(x,y_filtered_with_average, color="green",label='average filter')
if self.gaussian_flag.get() == 1:
y_filtered_with_gaussian = filterer.gaussianFilter(y, window_size=self.gaussian_window_size.get(), std=self.gaussian_std.get())
f.plot(x,y_filtered_with_gaussian, color="red",label='gaussian filter')
if self.graph_limit_flag.get() == 1:
f.set_ylim([self.graph_min_y.get(), self.graph_max_y.get()])
# legend
f.legend(loc='upper left', frameon=False)
self.canvas.show()
#self.toolbar.update()
except:
pass
def openFile(self):
file_path = askopenfilename(#initialdir="~/",
filetypes =(("Excel Files", "*.xlsx"),("All Files","*.*")),
title = "Choose a file."
)
# when cancel the file dialog
if(file_path == ''):
return
self.wb = load_workbook(file_path, data_only=True)
for i, child in enumerate(self.sheetframe.winfo_children()):
if i != 0:
child.destroy()
self.makeSheetBtn()
def saveFile(self):
file_path = asksaveasfilename( defaultextension=".xlsx")
# TODO : add logger
if file_path == None:
return
wb = None
ws = None
if os.path.exists(file_path):
wb = load_workbook(file_path)
result_title = "result_"+str(datetime.now().year)+"_"+str(datetime.now().month)+"_"+str(datetime.now().day)+"_"+str(datetime.now().hour)+"_"+str(datetime.now().minute)+"_"+str(datetime.now().second)
ws = wb.create_sheet(title=result_title)
else:
wb = Workbook()
ws =wb.active
self.fillResult(ws)
wb.save(file_path)
def fillResult(self, ws):
filterer = Filterer()
x_data = self.ws[self.x_start_var.get():self.x_end_var.get()]
y_data = self.ws[self.y_start_var.get():self.y_end_var.get()]
x = []
y = []
for row in x_data:
for cell in row:
x.append(cell.value)
for row in y_data:
for cell in row:
y.append(cell.value)
# data read part
x = np.array(x)
y = np.array(y)
## memory allocation
for row in openpyxl.compat.range(1, len(y)+3):
for col in openpyxl.compat.range(1,5):
ws.cell(row=row,column=col)
offset = 2
## save x column
col_name_x = 'A'
start_x = col_name_x+str(offset)
end_x = col_name_x+str(len(x)+offset-1)
A = ws[start_x:end_x]
ws['A1']='x'
for i, row in enumerate(A):
for cell in row:
cell.value = x[i]
def fillCol(ws, col_name, field_name,offset, data):
## save original
col_name = col_name
start_y = col_name+str(offset)
end_y = col_name+str(len(data)+offset-1)
col = ws[start_y:end_y]
ws[col_name+'1']= field_name
for i, row in enumerate(col):
for cell in row:
cell.value = data[i]
## save original
col_name_y = 'B'
start_y = col_name_y+str(offset)
end_y = col_name_y+str(len(x)+offset-1)
B = ws[start_y:end_y]
ws['B1']='y origin'
for i, row in enumerate(B):
for cell in row:
cell.value = y[i]
## save peak of original
peak_y = filterer.findPeak(y)
print(peak_y)
fillCol(ws, 'C', 'peak y origin', offset, peak_y)
## gaussian
y_filtered_with_gaussian = filterer.gaussianFilter(y, window_size=self.gaussian_window_size.get(), std=self.gaussian_std.get())
## save original
col_name_y_gaussian = 'D'
start_y_gaussian = col_name_y_gaussian+str(offset)
end_y_gaussian = col_name_y_gaussian+str(len(x)+offset-1)
D = ws[start_y_gaussian:end_y_gaussian]
ws['D1']='y filtered with gaussian kernel'
for i, row in enumerate(D):
for cell in row:
cell.value = y_filtered_with_gaussian[i]
## save peak of gaussian
peak_y_gaussian = filterer.findPeak(y_filtered_with_gaussian)
fillCol(ws, 'E', 'peak y gaussian', offset, peak_y_gaussian)
y_filtered_with_average = filterer.averageFilter(y,window_size=self.average_window_size.get())
## save original
col_name_y_average = 'F'
start_y_average = col_name_y_average+str(offset)
end_y_average = col_name_y_average+str(len(x)+offset-1)
F = ws[start_y_average:end_y_average]
ws['F1']='y filtered with average kernel'
for i, row in enumerate(F):
for cell in row:
cell.value = y_filtered_with_average[i]
## save peak of average
peak_y_average = filterer.findPeak(y_filtered_with_average)
fillCol(ws, 'G', 'peak y average', offset, peak_y_average)
def makeSheetBtn(self):
sheet_names = self.wb.get_sheet_names()
for i, sheet_name in enumerate(sheet_names):
tmp = sheet_name
ttk.Button(self.sheetframe, text=sheet_name, command=lambda sheet_name=sheet_name: self.selectSheet(sheet_name)).grid(row=math.floor(i/self.sheet_max_num_in_row), column=(i+1)%self.sheet_max_num_in_row,sticky=(tk.W))
def selectSheet(self, sheet_name):
self.current_sheet_text.set(sheet_name)
self.ws = self.wb[sheet_name]
def run(self):
self.root.after(100, self.update)
self.root.mainloop()
if __name__=='__main__':
GraphTool().run()
#main()
#onlyOneFilter()
| apache-2.0 |
keshr3106/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
sdh11/gnuradio | gr-filter/examples/interpolate.py | 7 | 8811 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._interp))
print("Number of taps: ", len(self._taps))
print("Number of filters: ", self._interp)
print("Taps per channel: ", tpc)
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = numpy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_int / 2.0, fs_int / 2.0, fs_int / float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0 / fs_int
Tmax = len(d)*Ts_int
t_o = numpy.arange(0, Tmax, Ts_int)
x_o1 = numpy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_aint / 2.0, fs_aint / 2.0, fs_aint / float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0 / fs_aint
Tmax = len(d)*Ts_aint
t_o = numpy.arange(0, Tmax, Ts_aint)
x_o2 = numpy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Clyde-fare/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
CarterBain/AlephNull | tests/test_tradingcalendar.py | 3 | 8728 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from zipline.utils import tradingcalendar
from zipline.utils import tradingcalendar_lse
from zipline.utils import tradingcalendar_tse
import pytz
import datetime
from zipline.finance.trading import TradingEnvironment
import pandas as pd
from pandas import DatetimeIndex
from nose.tools import nottest
class TestTradingCalendar(TestCase):
def setUp(self):
today = pd.Timestamp('today', tz='UTC')
self.end = DatetimeIndex([today])
@nottest
def test_calendar_vs_environment(self):
"""
test_calendar_vs_environment checks whether the
historical data from yahoo matches our rule based system.
handy, if not canonical, reference:
http://www.chronos-st.org/NYSE_Observed_Holidays-1885-Present.html
"""
env = TradingEnvironment()
env_start_index = \
env.trading_days.searchsorted(tradingcalendar.start)
env_days = env.trading_days[env_start_index:]
cal_days = tradingcalendar.trading_days
self.check_days(env_days, cal_days)
@nottest
def test_lse_calendar_vs_environment(self):
env = TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
env_start_index = \
env.trading_days.searchsorted(tradingcalendar_lse.start)
env_days = env.trading_days[env_start_index:]
cal_days = tradingcalendar_lse.trading_days
self.check_days(env_days, cal_days)
@nottest
def test_tse_calendar_vs_environment(self):
env = TradingEnvironment(
bm_symbol='^GSPTSE',
exchange_tz='US/Eastern'
)
env_start_index = \
env.trading_days.searchsorted(tradingcalendar_tse.start)
env_days = env.trading_days[env_start_index:]
cal_days = tradingcalendar_tse.trading_days
self.check_days(env_days, cal_days)
def check_days(self, env_days, cal_days):
diff = env_days - cal_days
self.assertEqual(
len(diff),
0,
"{diff} should be empty".format(diff=diff)
)
diff2 = cal_days - env_days
self.assertEqual(
len(diff2),
0,
"{diff} should be empty".format(diff=diff2)
)
def test_newyears(self):
"""
Check whether tradingcalendar contains certain dates.
"""
# January 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
day_after_new_years_sunday = datetime.datetime(
2012, 1, 2, tzinfo=pytz.utc)
self.assertNotIn(day_after_new_years_sunday,
tradingcalendar.trading_days,
"""
If NYE falls on a weekend, {0} the Monday after is a holiday.
""".strip().format(day_after_new_years_sunday)
)
first_trading_day_after_new_years_sunday = datetime.datetime(
2012, 1, 3, tzinfo=pytz.utc)
self.assertIn(first_trading_day_after_new_years_sunday,
tradingcalendar.trading_days,
"""
If NYE falls on a weekend, {0} the Tuesday after is the first trading day.
""".strip().format(first_trading_day_after_new_years_sunday)
)
# January 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
new_years_day = datetime.datetime(
2013, 1, 1, tzinfo=pytz.utc)
self.assertNotIn(new_years_day,
tradingcalendar.trading_days,
"""
If NYE falls during the week, e.g. {0}, it is a holiday.
""".strip().format(new_years_day)
)
first_trading_day_after_new_years = datetime.datetime(
2013, 1, 2, tzinfo=pytz.utc)
self.assertIn(first_trading_day_after_new_years,
tradingcalendar.trading_days,
"""
If the day after NYE falls during the week, {0} \
is the first trading day.
""".strip().format(first_trading_day_after_new_years)
)
def test_thanksgiving(self):
"""
Check tradingcalendar Thanksgiving dates.
"""
# November 2005
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30
thanksgiving_with_four_weeks = datetime.datetime(
2005, 11, 24, tzinfo=pytz.utc)
self.assertNotIn(thanksgiving_with_four_weeks,
tradingcalendar.trading_days,
"""
If Nov has 4 Thursdays, {0} Thanksgiving is the last Thursady.
""".strip().format(thanksgiving_with_four_weeks)
)
# November 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30
thanksgiving_with_five_weeks = datetime.datetime(
2006, 11, 23, tzinfo=pytz.utc)
self.assertNotIn(thanksgiving_with_five_weeks,
tradingcalendar.trading_days,
"""
If Nov has 5 Thursdays, {0} Thanksgiving is not the last week.
""".strip().format(thanksgiving_with_five_weeks)
)
first_trading_day_after_new_years_sunday = datetime.datetime(
2012, 1, 3, tzinfo=pytz.utc)
self.assertIn(first_trading_day_after_new_years_sunday,
tradingcalendar.trading_days,
"""
If NYE falls on a weekend, {0} the Tuesday after is the first trading day.
""".strip().format(first_trading_day_after_new_years_sunday)
)
def test_day_after_thanksgiving(self):
early_closes = tradingcalendar.get_early_closes(
tradingcalendar.start,
tradingcalendar.end.replace(year=tradingcalendar.end.year + 1)
)
# November 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3
# 4 5 6 7 8 9 10
# 11 12 13 14 15 16 17
# 18 19 20 21 22 23 24
# 25 26 27 28 29 30
fourth_friday = datetime.datetime(2012, 11, 23, tzinfo=pytz.utc)
self.assertIn(fourth_friday, early_closes)
# November 2013
# Su Mo Tu We Th Fr Sa
# 1 2
# 3 4 5 6 7 8 9
# 10 11 12 13 14 15 16
# 17 18 19 20 21 22 23
# 24 25 26 27 28 29 30
fifth_friday = datetime.datetime(2013, 11, 29, tzinfo=pytz.utc)
self.assertIn(fifth_friday, early_closes)
def test_early_close_independence_day_thursday(self):
"""
Until 2013, the market closed early the Friday after an
Independence Day on Thursday. Since then, the early close is on
Wednesday.
"""
early_closes = tradingcalendar.get_early_closes(
tradingcalendar.start,
tradingcalendar.end.replace(year=tradingcalendar.end.year + 1)
)
# July 2002
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
wednesday_before = datetime.datetime(2002, 7, 3, tzinfo=pytz.utc)
friday_after = datetime.datetime(2002, 7, 5, tzinfo=pytz.utc)
self.assertNotIn(wednesday_before, early_closes)
self.assertIn(friday_after, early_closes)
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
wednesday_before = datetime.datetime(2013, 7, 3, tzinfo=pytz.utc)
friday_after = datetime.datetime(2013, 7, 5, tzinfo=pytz.utc)
self.assertIn(wednesday_before, early_closes)
self.assertNotIn(friday_after, early_closes)
| apache-2.0 |
MengGuo/P_MAS_TG | Intro/Examples/to_matlab/square_world.py | 2 | 6050 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# export PYTHONPATH=$PYTHONPATH:/to/your/P_MAS_TG
import time
from P_MAS_TG.ts import MotionFts, ActionModel, MotActModel
from P_MAS_TG.planner import ltl_planner
from networkx import draw_networkx, spring_layout
from itertools import product
import matplotlib.pyplot as plt
rectworld_nodename = "x{:d}y{:d}".format
def create_rectworld(Xs, Ys, eight_connected=False):
'''
create a rectangular world with square cells.
eight_connected True if diagonal motion is allowed.
'''
node_dict = {}
symbols = []
X_range = range(0, Xs)
Y_range = range(0, Ys)
for X in X_range:
for Y in Y_range:
node_name = rectworld_nodename(X, Y)
node_dict[(X,Y)] = set([node_name,])
symbols.append(node_name)
g = MotionFts(node_dict, symbols, 'rectworld')
for X in X_range:
for Y in Y_range:
nodef = (X, Y)
if eight_connected:
offsets = product([-1,0,1],[-1,0,1])
else:
offsets = [(0,0),(0,1),(0,-1),(1,0),(-1,0)]
for (dx,dy) in offsets:
xt = X + dx
yt = Y + dy
if xt not in X_range or yt not in Y_range:
continue
nodet = (xt, yt)
unit_cost = 1
g.add_edge(nodef, nodet, weight=unit_cost*(abs(dx)+abs(dy)))
return g
# create motion model
robot_motion = create_rectworld(4,4,True)
robot_motion.set_initial((0,0))
# empty action model
robot_action = ActionModel(dict())
# complete robot model
robot_model = MotActModel(robot_motion, robot_action)
# task formula
hard_task = '(<> x0y3) && (<> x3y2)'
soft_task = None
# set planner
robot_planner = ltl_planner(robot_model, hard_task, soft_task)
# synthesis
start = time.time()
robot_planner.optimal(10,'static')
print '------------------------------'
print 'Full construction and synthesis done within %.2fs' %(time.time()-start)
#----------------------------------------
#----------------------------------------
# save transition system to csv.dat that Matlab wants
# important to transform string names to indexs
ts = robot_planner.product.graph['ts']
ts_nodes_list = ts.nodes()
# save node name, index pairs
# also save initial, accept stats
f_ts_node = open('data/ts_node.dat','w')
f_ts_initial = open('data/ts_node_initial.dat','w')
for nd_id, nd in enumerate(ts_nodes_list):
# ts_node_id, ts_node_x, ts_node_y
f_ts_node.write('%d,%d,%d\n' %(nd_id, nd[0][0], nd[0][1]))
ts.nodes[nd]['index'] = nd_id
if nd in ts.graph['initial']:
f_ts_initial.write('%d\n' %nd_id)
f_ts_node.close()
f_ts_initial.close()
# save edges, node name swapped by index
f_ts_edge = open('data/ts_edge.dat','w')
for e in ts.edges():
id_ef = ts.nodes[e[0]]['index']
id_et = ts.nodes[e[1]]['index']
f_ts_edge.write('%d,%d\n' %(id_ef, id_et))
f_ts_edge.close()
#----------------------------------------
#----------------------------------------
# save Buchi automata to csv.dat that Matlab wants
# important to transform string names to indexs
buchi = robot_planner.product.graph['buchi']
buchi_nodes_list = buchi.nodes()
# save node name, index pairs
# also save initial, accept states
f_buchi_node = open('data/buchi_node.dat','w')
f_buchi_initial = open('data/buchi_node_initial.dat','w')
f_buchi_accept = open('data/buchi_node_accept.dat','w')
for nd_id, nd in enumerate(buchi_nodes_list):
buchi.nodes[nd]['index'] = nd_id
f_buchi_node.write('%d\n' %nd_id)
if nd in buchi.graph['initial']:
f_buchi_initial.write('%d\n' %nd_id)
if nd in buchi.graph['accept']:
f_buchi_accept.write('%d\n' %nd_id)
f_buchi_node.close()
f_buchi_initial.close()
f_buchi_accept.close()
# save edges, node name swapped by index
f_buchi_edge = open('data/buchi_edge.dat','w')
for e in buchi.edges():
id_ef = buchi.nodes[e[0]]['index']
id_et = buchi.nodes[e[1]]['index']
f_buchi_edge.write('%d,%d\n' %(id_ef, id_et))
f_buchi_edge.close()
#----------------------------------------
#----------------------------------------
# save Prod automata to csv.dat that Matlab wants
# important to transform string names to indexs
prod = robot_planner.product
prod_nodes_list = prod.nodes()
# save node name, index pairs
# also save initial, accept states
f_prod_node = open('data/prod_node.dat','w')
f_prod_initial = open('data/prod_node_initial.dat','w')
f_prod_accept = open('data/prod_node_accept.dat','w')
for nd_id, nd in enumerate(prod_nodes_list):
#f_prod_node.write('%d,%s\n' %(nd_id, nd))
# prod_node_id, ts_node_x, ts_node_y
prod.nodes[nd]['index'] = nd_id
f_prod_node.write('%d,%d,%d,%d\n' %(nd_id, nd[0][0][0], nd[0][0][1], buchi.nodes[nd[1]]['index']))
if nd in prod.graph['initial']:
f_prod_initial.write('%d\n' %nd_id)
if nd in prod.graph['accept']:
f_prod_accept.write('%d\n' %nd_id)
f_prod_node.close()
f_prod_initial.close()
f_prod_accept.close()
# save edges, node name swapped by index
f_prod_edge = open('data/prod_edge.dat','w')
for e in prod.edges():
id_ef = prod.nodes[e[0]]['index']
id_et = prod.nodes[e[1]]['index']
f_prod_edge.write('%d,%d\n' %(id_ef, id_et))
f_prod_edge.close()
print '------------------------------'
print 'Check *.mat files and load them in Matlab.'
print '------------------------------'
print 'Check *.pdf for visualization of ts, buchi and prod'
draw_networkx(ts,pos=spring_layout(ts))
plt.savefig('figures/ts.pdf',bbox_inches='tight')
plt.clf()
draw_networkx(buchi,pos=spring_layout(buchi))
plt.savefig('figures/buchi.pdf',bbox_inches='tight')
plt.clf()
draw_networkx(prod,pos=spring_layout(prod))
plt.savefig('figures/prod.pdf',bbox_inches='tight')
#-------------------
# load all .dat by 'csvread()' in matlab
#------------------- | gpl-2.0 |
crleblanc/cython_talk_2105 | time_compare.py | 1 | 3644 | #!/usr/bin/env python
# python script to do timings on various 2D laplace implementations
from __future__ import print_function
import time
import argparse
import numpy as np
import json
import matplotlib
from matplotlib import pyplot as plt
import py_laplace
import np_laplace
import cy_laplace
import cy_wrap_claplace
import numba_laplace # requires numba, easiest to use Anaconda distribution
from run_comparison import run_all
def getargs():
parser = argparse.ArgumentParser(description='Time and/or plot the timings of the Laplace benchmarks')
parser.add_argument('-t', '--timing', dest='timing', action='store_true',
help='Run the timing operation')
parser.add_argument('-p', '--plot', dest='plot', action='store_true',
help='Plot the data from results.json obtained from an earlier run, see --timing')
return parser.parse_args()
def plot_results(results_list, xmin, xmax, ymin, ymax):
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,11), tight_layout=True)
plt.title('2D Laplace Python implementation benchmark')
results, pypy_results = results_list
# place Pypy result as the second graph. Results run from 'run_pypy_laplce.py' using pypy/numpypy.
results.insert(1, pypy_results)
for idx, result in enumerate(results):
name = result['name']
array_shapes = result['array_shapes']
times = result['times']
plt.subplot(2, 1, 1)
plt.xlabel('array size (X*Y)')
plt.ylabel('time per iteration (s)')
plt.xlim(0, xmax)
plt.ylim(0, ymax)
plt.plot(array_shapes, times, '.-', linewidth=2.0, label=name)
legend = plt.legend(loc='upper right', bbox_to_anchor=(1.9, 1.0), framealpha=0.0)
plt.subplot(2, 1, 2)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.xlabel('array size (X*Y)')
plt.ylabel('time per iteration (s)')
plt.loglog(array_shapes, times, '.-', linewidth=2.0, label=name)
plt.savefig('slides/results-%d.svg' % idx,
bbox_extra_artists=(legend,),
bbox_inches='tight',
transparent=True)
#plt.show()
def main():
# don't make it bigger than 20000, that's a massive array!
#array_shapes = [10, 20, 50, 100, 200, 500, 1000, 3000, 5000, 10000, 15000, 20000]
array_shapes = [10, 12, 15, 18, 20, 35, 50, 100, 200, 500, 800, 1000, 1500, 2000, 3162]
ymax = array_shapes[-1]/25000.0
results = None
pypy_results = None
args = getargs()
laplace_funcs = (('Pure Python (Cpython)', py_laplace.py_run),
('NumPy', np_laplace.np_run),
('Numba', numba_laplace.numba_run),
('Cython', cy_laplace.cy_run),
('Cython C wrapper', cy_wrap_claplace.cy_run_c_wrap),
('Cython parallel', cy_laplace.cy_run_parallel),
# ('Numba laplace vectorized', numba_laplace.numba_run_vectorized),
)
if args.timing:
results = run_all(laplace_funcs, array_shapes, maxtime=ymax)
with open('results.json', 'w') as fp:
json.dump(results, fp)
if args.plot:
if results is None:
with open('results.json', 'r') as fp:
results = json.load(fp)
with open('pypy_results.json', 'r') as pypy_fp:
pypy_results = json.load(pypy_fp)
plot_results([results, pypy_results], array_shapes[0]**2, array_shapes[-1]**2, 10e-8, ymax)
if __name__ == '__main__':
main()
| cc0-1.0 |
mikebenfield/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
breuderink/psychic | psychic/tests/testpositions.py | 1 | 1120 | import unittest, os
import numpy as np
import matplotlib.pyplot as plt
from ..positions import *
class Test10_5(unittest.TestCase):
def test_dists(self):
def dist(a, b):
return np.linalg.norm((np.atleast_1d(a) - np.atleast_1d(b)))
def test_eq_dists(labs):
dists = [dist(POS_10_5[a], POS_10_5[b]) for a, b in zip(labs, labs[1:])]
self.assert_(np.all(np.abs(np.diff(dists)) < 1e-3))
test_eq_dists('Nz Fpz AFz Fz FCz Cz CPz Pz POz Oz Iz'.split())
test_eq_dists('Fpz Fp1 AF7 F7 FT7 T7 TP7 P7 PO7 O1 Oz'.split())
test_eq_dists('Fpz Fp2 AF8 F8 FT8 T8 TP8 P8 PO8 O2 Oz'.split())
test_eq_dists('T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10'.split())
def test_plot_locs(self):
locs = []
plt.clf()
for (label, coord) in POS_10_5.items():
x, y = project_scalp(*coord)
plt.text(x, y + .03, label, fontsize=6, ha='center')
locs.append((x, y))
locs = np.asarray(locs)
plt.plot(locs[:, 0], locs[:, 1], '.k', ms=3)
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.savefig(os.path.join('out', '10-5.eps'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
Aegeaner/spark | python/pyspark/sql/types.py | 1 | 67445 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
>>> # Decimal with negative scale.
>>> check_datatype(DecimalType(1,-1))
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _arrow_column_to_pandas(column, data_type):
""" Convert Arrow Column to pandas Series.
:param series: pyarrow.lib.Column
:param data_type: a Spark data type for the column
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given column is a date type column, creates a series of datetime.date directly instead
# of creating datetime64[ns] as intermediate data to avoid overflow caused by datetime64[ns]
# type handling.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if type(data_type) == DateType:
return pd.Series(column.to_pylist(), name=column.name)
else:
return column.to_pandas()
else:
# Since Arrow 0.11.0, support date_as_object to return datetime.date instead of
# np.datetime64.
return column.to_pandas(date_as_object=True)
def _arrow_table_to_pandas(table, schema):
""" Convert Arrow Table to pandas DataFrame.
Pandas DataFrame created from PyArrow uses datetime64[ns] for date type values, but we should
use datetime.date to match the behavior with when Arrow optimization is disabled.
:param table: pyarrow.lib.Table
:param schema: a Spark schema of the pyarrow.lib.Table
"""
import pandas as pd
import pyarrow as pa
from distutils.version import LooseVersion
# If the given table contains a date type column, use `_arrow_column_to_pandas` for pyarrow<0.11
# or use `date_as_object` option for pyarrow>=0.11 to avoid creating datetime64[ns] as
# intermediate data.
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
if any(type(field.dataType) == DateType for field in schema):
return pd.concat([_arrow_column_to_pandas(column, field.dataType)
for column, field in zip(table.itercolumns(), schema)], axis=1)
else:
return table.to_pandas()
else:
return table.to_pandas(date_as_object=True)
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/window/test_grouper.py | 1 | 6942 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.groupby.groupby import get_groupby
class TestGrouperGrouping:
def setup_method(self, method):
self.series = Series(np.arange(10))
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
def test_mutated(self):
msg = r"groupby\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
self.frame.groupby("A", foo=1)
g = self.frame.groupby("A")
assert not g.mutated
g = get_groupby(self.frame, by="A", mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby("A")
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby("A")
r = g.rolling(2)
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby("A")
r = g.rolling(window=4)
for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_rolling_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)
)
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby("A")
r = g.rolling(window=4)
for f in ["corr", "cov"]:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby("A")
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
g = df.groupby("A")
mi = pd.MultiIndex.from_tuples(
[("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]
)
mi.names = ["A", None]
# Grouped column should not be a part of the output
expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby("A")
r = g.expanding()
for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_expanding_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.expanding()
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.expanding().quantile(0.4, interpolation=interpolation)
)
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby("A")
r = g.expanding()
for f in ["corr", "cov"]:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby("A")
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]])
def test_groupby_rolling(self, expected_value, raw_value):
# GH 31754
def foo(x):
return int(isinstance(x, np.ndarray))
df = pd.DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]})
result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value)
expected = Series(
[expected_value] * 3,
index=pd.MultiIndex.from_tuples(
((1, 0), (1, 1), (1, 2)), names=["id", None]
),
name="value",
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
codefordc/housing-insights | back_end/ETL/subsidy.py | 1 | 1356 | '''
subsidy.py
----------
This file collects and cleans the susidy data from the
preservation catalog.
The resulting dataset from this file looks like:
subsidy_id nlihc_id poa_start poa_end portfolio
--------------------------------------------------------------
1 NL000001 2004-12-31 2034-12-31 DC Dept of H...
2 NL000001 2004-11-01 2024-10-31 US Dept of H...
3 NL000001 2004-10-01 2024-09-30 US Dept of H...
4 NL000001 2005-03-01 2020-03-01 DC
5 NL000001 2005-03-01 2045-03-01 DC
'''
import numpy as np
import pandas as pd
from . import utils
def load_preservation_catalog_subsidies():
'''
Loads the raw data from the preservation catalog.
It is located in 'preservation_catalog' on the S3.
'''
df = pd.read_csv(utils.S3+'preservation_catalog/Subsidy.csv')
df.columns = df.columns.str.lower()
df['poa_start'] = pd.to_datetime(df.poa_start.replace('N', np.NaN))
df['poa_end'] = pd.to_datetime(df.poa_end.replace('N', np.NaN))
return df[['subsidy_id', 'nlihc_id', 'portfolio', 'poa_start', 'poa_end']]
def load_subsidy_data(engine):
'''Adds subsidy table to database (docker or production).'''
df = load_preservation_catalog_subsidies()
return utils.write_table(df, 'new_subsidy', engine)
| mit |
scubamut/trading-with-python | lib/classes.py | 76 | 7847 | """
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
| bsd-3-clause |
jlegendary/orange | Orange/regression/pls.py | 6 | 16929 | """\
##########################################
Partial least sqaures regression (``PLS``)
##########################################
.. index:: regression
.. _`Parital Least Squares Regression`: http://en.wikipedia.org/wiki/Partial_least_squares_regression
`Partial least squares
<http://en.wikipedia.org/wiki/Partial_least_squares_regression>`_
regression is a statistical method for simultaneous prediction of
multiple response variables. Orange's implementation is
based on `Scikit learn python implementation
<https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/pls.py>`_.
The following code shows how to fit a PLS regression model on a multi-target data set.
.. literalinclude:: code/pls-example.py
:lines: 7,9,13,14
.. autoclass:: PLSRegressionLearner
:members:
.. autoclass:: PLSRegression
:members:
Utility functions
-----------------
.. autofunction:: normalize_matrix
.. autofunction:: nipals_xy
.. autofunction:: svd_xy
========
Examples
========
The following code predicts the values of output variables for the
first two instances in ``data``.
.. literalinclude:: code/pls-example.py
:lines: 16-20
::
Actual [<orange.Value 'Y1'='0.490'>, <orange.Value 'Y2'='1.237'>, <orange.Value 'Y3'='1.808'>, <orange.Value 'Y4'='0.422'>]
Predicted [<orange.Value 'Y1'='0.613'>, <orange.Value 'Y2'='0.826'>, <orange.Value 'Y3'='1.084'>, <orange.Value 'Y4'='0.534'>]
Actual [<orange.Value 'Y1'='0.167'>, <orange.Value 'Y2'='-0.664'>, <orange.Value 'Y3'='-1.378'>, <orange.Value 'Y4'='0.589'>]
Predicted [<orange.Value 'Y1'='0.058'>, <orange.Value 'Y2'='-0.706'>, <orange.Value 'Y3'='-1.420'>, <orange.Value 'Y4'='0.599'>]
To see the coefficient of the model, print the model:
.. literalinclude:: code/pls-example.py
:lines: 22
::
Regression coefficients:
Y1 Y2 Y3 Y4
X1 0.714 2.153 3.590 -0.078
X2 -0.238 -2.500 -4.797 -0.036
X3 0.230 -0.314 -0.880 -0.060
Note that coefficients are stored in a matrix since the model predicts
values of multiple outputs.
"""
import Orange
import numpy
from Orange.regression import base
from numpy import dot, zeros
from numpy import linalg
from numpy.linalg import svd, pinv
from Orange.utils import deprecated_members, deprecated_keywords
def normalize_matrix(X):
"""
Normalize a matrix column-wise: subtract the means and divide by
standard deviations. Returns the standardized matrix, sample mean
and standard deviation
:param X: data matrix
:type X: :class:`numpy.array`
"""
mu_x, sigma_x = numpy.mean(X, axis=0), numpy.std(X, axis=0)
sigma_x[sigma_x == 0] = 1.
return (X - mu_x)/sigma_x, mu_x, sigma_x
@deprecated_keywords({"maxIter": "max_iter"})
def nipals_xy(X, Y, mode="PLS", max_iter=500, tol=1e-06):
"""
NIPALS algorithm; returns the first left and rigth singular
vectors of X'Y.
:param X, Y: data matrix
:type X, Y: :class:`numpy.array`
:param mode: possible values "PLS" (default) or "CCA"
:type mode: string
:param max_iter: maximal number of iterations (default: 500)
:type max_iter: int
:param tol: tolerance parameter; if norm of difference
between two successive left singular vectors is less than tol,
iteration is stopped
:type tol: a not negative float
"""
yScore, uOld, ite = Y[:, [0]], 0, 1
Xpinv = Ypinv = None
# Inner loop of the Wold algo.
while True and ite < max_iter:
# Update u: the X weights
if mode == "CCA":
if Xpinv is None:
Xpinv = linalg.pinv(X) # compute once pinv(X)
u = dot(Xpinv, yScore)
else: # mode PLS
# Mode PLS regress each X column on yScore
u = dot(X.T, yScore) / dot(yScore.T, yScore)
# Normalize u
u /= numpy.sqrt(dot(u.T, u))
# Update xScore: the X latent scores
xScore = dot(X, u)
# Update v: the Y weights
if mode == "CCA":
if Ypinv is None:
Ypinv = linalg.pinv(Y) # compute once pinv(Y)
v = dot(Ypinv, xScore)
else:
# Mode PLS regress each X column on yScore
v = dot(Y.T, xScore) / dot(xScore.T, xScore)
# Normalize v
v /= numpy.sqrt(dot(v.T, v))
# Update yScore: the Y latent scores
yScore = dot(Y, v)
uDiff = u - uOld
if dot(uDiff.T, uDiff) < tol or Y.shape[1] == 1:
break
uOld = u
ite += 1
return u, v
def svd_xy(X, Y):
""" Return the first left and right singular
vectors of X'Y.
:param X, Y: data matrix
:type X, Y: :class:`numpy.array`
"""
U, s, V = svd(dot(X.T, Y), full_matrices=False)
u = U[:, [0]]
v = V.T[:, [0]]
return u, v
def select_attrs(table, attributes, class_var=None, metas=None):
""" Select ``attributes`` from the ``table`` and return a new data table.
"""
domain = Orange.data.Domain(attributes, class_var)
if metas:
domain.add_metas(metas)
return Orange.data.Table(domain, table)
@deprecated_members(
{"nComp": "n_comp",
"deflationMode": "deflation_mode",
"maxIter": "max_iter"},
wrap_methods=["__init__"])
class PLSRegressionLearner(base.BaseRegressionLearner):
"""
Fit the partial least squares regression model, i.e. learn the
regression parameters. The implementation is based on `Scikit
learn python implementation`_
The class is derived from
:class:`Orange.regression.base.BaseRegressionLearner` that is
used for preprocessing the data (continuization and imputation)
before fitting the regression parameters
"""
def __init__(self, n_comp=2, deflation_mode="regression", mode="PLS",
algorithm="nipals", max_iter=500,
imputer=None, continuizer=None,
**kwds):
"""
.. attribute:: n_comp
number of components to keep (default: 2)
.. attribute:: deflation_mode
"canonical" or "regression" (default)
.. attribute:: mode
"CCA" or "PLS" (default)
.. attribute:: algorithm
The algorithm for estimating the weights:
"nipals" or "svd" (default)
"""
self.n_comp = n_comp
self.deflation_mode = deflation_mode
self.mode = mode
self.algorithm = algorithm
self.max_iter = max_iter
self.set_imputer(imputer=imputer)
self.set_continuizer(continuizer=continuizer)
self.__dict__.update(kwds)
@deprecated_keywords({"xVars": "x_vars", "yVars": "y_vars"})
def __call__(self, table, weight_id=None, x_vars=None, y_vars=None):
"""
:param table: data instances.
:type table: :class:`Orange.data.Table`
:param x_vars, y_vars: List of input and response variables
(:obj:`Orange.feature.Continuous` or
:obj:`Orange.feature.Discrete`). If ``None`` (default) it is
assumed that the data domain provides information which variables
are reponses and which are not. If data has
:obj:`~Orange.data.Domain.class_var` defined in its domain, a
single-target regression learner is constructed. Otherwise a
multi-target learner predicting response variables defined by
:obj:`~Orange.data.Domain.class_vars` is constructed.
:type x_vars, y_vars: list
"""
domain = table.domain
multitarget = False
if x_vars is None and y_vars is None:
# Response variables are defined in the table.
x_vars = domain.features
if domain.class_var:
y_vars = [domain.class_var]
elif domain.class_vars:
y_vars = domain.class_vars
multitarget = True
else:
raise TypeError('Class-less domain (x-vars and y-vars needed).')
elif not (x_vars and y_vars):
raise ValueError("Both x_vars and y_vars must be defined.")
else:
multitarget = True
x_table = select_attrs(table, x_vars)
y_table = select_attrs(table, y_vars)
# dicrete values are continuized
x_table = self.continuize_table(x_table)
y_table = self.continuize_table(y_table)
# missing values are imputed
x_table = self.impute_table(x_table)
y_table = self.impute_table(y_table)
# Collect the new transformed x_vars/y_vars
x_vars = list(x_table.domain.variables)
y_vars = list(y_table.domain.variables)
domain = Orange.data.Domain(x_vars + y_vars, False)
x = x_table.to_numpy()[0]
y = y_table.to_numpy()[0]
kwargs = self.fit(x, y)
return PLSRegression(domain=domain, x_vars=x_vars, y_vars=y_vars,
multitarget=multitarget, **kwargs)
def fit(self, X, Y):
""" Fit all unknown parameters, i.e.
weights, scores, loadings (for x and y) and regression coefficients.
Return a dict with all of the parameters.
"""
# copy since this will contain the residuals (deflated) matrices
X, Y = X.copy(), Y.copy()
if Y.ndim == 1:
Y = Y.reshape((Y.size, 1))
n, p = X.shape
q = Y.shape[1]
# normalization of data matrices
X, muX, sigmaX = normalize_matrix(X)
Y, muY, sigmaY = normalize_matrix(Y)
# Residuals (deflated) matrices
Xk, Yk = X, Y
# Results matrices
T, U = zeros((n, self.n_comp)), zeros((n, self.n_comp))
W, C = zeros((p, self.n_comp)), zeros((q, self.n_comp))
P, Q = zeros((p, self.n_comp)), zeros((q, self.n_comp))
# NIPALS over components
for k in xrange(self.n_comp):
# Weights estimation (inner loop)
if self.algorithm == "nipals":
u, v = nipals_xy(X=Xk, Y=Yk, mode=self.mode,
max_iter=self.max_iter)
elif self.algorithm == "svd":
u, v = svd_xy(X=Xk, Y=Yk)
# compute scores
xScore, yScore = dot(Xk, u), dot(Yk, v)
# Deflation (in place)
# - regress Xk's on xScore
xLoadings = dot(Xk.T, xScore) / dot(xScore.T, xScore)
# - substract rank-one approximations to obtain remainder matrix
Xk -= dot(xScore, xLoadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on yScore, then substract rank-one approx.
yLoadings = dot(Yk.T, yScore) / dot(yScore.T, yScore)
Yk -= dot(yScore, yLoadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on xScore, then substract rank-one approx.
yLoadings = dot(Yk.T, xScore) / dot(xScore.T, xScore)
Yk -= dot(xScore, yLoadings.T)
# Store weights, scores and loadings
T[:, k] = xScore.ravel() # x-scores
U[:, k] = yScore.ravel() # y-scores
W[:, k] = u.ravel() # x-weights
C[:, k] = v.ravel() # y-weights
P[:, k] = xLoadings.ravel() # x-loadings
Q[:, k] = yLoadings.ravel() # y-loadings
# X = TP' + E and Y = UQ' + E
# Rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
xRotations = dot(W, pinv(dot(P.T, W)))
if Y.shape[1] > 1:
yRotations = dot(C, pinv(dot(Q.T, C)))
else:
yRotations = numpy.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Y = TQ' + E = X W(P'W)^-1Q' + E = XB + E
# => B = W*Q' (p x q)
coefs = dot(xRotations, Q.T)
coefs = 1. / sigmaX.reshape((p, 1)) * \
coefs * sigmaY
return {"mu_x": muX, "mu_y": muY, "sigma_x": sigmaX,
"sigma_y": sigmaY, "T": T, "U":U, "W":U,
"C": C, "P":P, "Q":Q, "x_rotations": xRotations,
"y_rotations": yRotations, "coefs": coefs}
@deprecated_members(
{"xVars": "x_vars",
"yVars": "y_vars",
"muX": "mu_x",
"muY": "mu_y",
"sigmaX": "sigma_x",
"sigmaY": "sigma_y"},
wrap_methods=["__init__"])
class PLSRegression(Orange.classification.Classifier):
""" Predict values of the response variables
based on the values of independent variables.
Basic notations:
n - number of data instances
p - number of independent variables
q - number of reponse variables
.. attribute:: T
A n x n_comp numpy array of x-scores
.. attribute:: U
A n x n_comp numpy array of y-scores
.. attribute:: W
A p x n_comp numpy array of x-weights
.. attribute:: C
A q x n_comp numpy array of y-weights
.. attribute:: P
A p x n_comp numpy array of x-loadings
.. attribute:: Q
A q x n_comp numpy array of y-loading
.. attribute:: coefs
A p x q numpy array coefficients
of the linear model: Y = X coefs + E
.. attribute:: x_vars
Predictor variables
.. attribute:: y_vars
Response variables
"""
def __init__(self, domain=None, multitarget=False, coefs=None, sigma_x=None, sigma_y=None,
mu_x=None, mu_y=None, x_vars=None, y_vars=None, **kwargs):
self.domain = domain
self.multitarget = multitarget
if multitarget and y_vars:
self.class_vars = y_vars
elif y_vars:
self.class_var = y_vars[0]
self.coefs = coefs
self.mu_x, self.mu_y = mu_x, mu_y
self.sigma_x, self.sigma_y = sigma_x, sigma_y
self.x_vars, self.y_vars = x_vars, y_vars
for name, val in kwargs.items():
setattr(self, name, val)
def __call__(self, instance, result_type=Orange.core.GetValue):
"""
:param instance: data instance for which the value of the response
variable will be predicted
:type instance: :class:`Orange.data.Instance`
"""
instance = Orange.data.Instance(self.domain, instance)
ins = [instance[v].native() for v in self.x_vars]
if "?" in ins: # missing value -> corresponding coefficient omitted
def miss_2_0(x): return x if x != "?" else 0
ins = map(miss_2_0, ins)
ins = numpy.array(ins)
xc = (ins - self.mu_x)
predicted = dot(xc, self.coefs) + self.mu_y
y_hat = [var(val) for var, val in zip(self.y_vars, predicted)]
if result_type == Orange.core.GetValue:
return y_hat if self.multitarget else y_hat[0]
else:
from Orange.statistics.distribution import Distribution
probs = []
for var, val in zip(self.y_vars, y_hat):
dist = Distribution(var)
dist[val] = 1.0
probs.append(dist)
if result_type == Orange.core.GetBoth:
return (y_hat, probs) if self.multitarget else (y_hat[0], probs[0])
else:
return probs if self.multitarget else probs[0]
def to_string(self):
""" Pretty-prints the coefficient of the PLS regression model.
"""
x_vars, y_vars = [x.name for x in self.x_vars], [y.name for y in self.y_vars]
fmt = "%8s " + "%12.3f " * len(y_vars)
first = [" " * 8 + "%13s" * len(y_vars) % tuple(y_vars)]
lines = [fmt % tuple([x_vars[i]] + list(coef))
for i, coef in enumerate(self.coefs)]
return '\n'.join(first + lines)
def __str__(self):
return self.to_string()
"""
def transform(self, X, Y=None):
# Normalize
Xc = (X - self.muX) / self.sigmaX
if Y is not None:
Yc = (Y - self.muY) / self.sigmaY
# Apply rotation
xScores = dot(Xc, self.xRotations)
if Y is not None:
yScores = dot(Yc, self.yRotations)
return xScores, yScores
return xScores
"""
if __name__ == "__main__":
import Orange
from Orange.regression import pls
data = Orange.data.Table("multitarget-synthetic")
l = pls.PLSRegressionLearner()
x = data.domain.features
y = data.domain.class_vars
print x, y
# c = l(data, x_vars=x, y_vars=y)
c = l(data)
print c
| gpl-3.0 |
ZuckermanLab/NMpathAnalysis | test/tools_for_notebook0.py | 1 | 3495 | import numpy as np
from math import pi,sin,exp,sqrt
import matplotlib.pyplot as plt
import scipy.interpolate
import matplotlib.cm as cm
import networkx as nx
from interval import Interval
from auxfunctions import *
from ensembles import *
# global variable: number of partitions per dimension in the 2D toy model
N = 6
def energy(x,y):
if (x > 6*pi) or (x < 0) or (y > 6*pi) or (y < 0):
return 10**10
else:
ener = 1.5*(1 - sin(x) * sin(y)) + 0.0009*(((x - (9 * sin(y/3) + y))**2) * (y - (9*sin(x/3) + x))**2)
return ener
def plot_traj(list_of_trajs, discrete=[False], line_width=0.5, std = 0.3, color = None, alpha = 0.5 ,title = '', figsize=(8,6.5)):
length = 6*pi
#----------
xlist = np.array([i*pi/17 for i in range(17*6+1)])
ylist = np.array([i*pi/17 for i in range(17*6+1)])
X,Y = np.meshgrid(xlist, ylist)
Z = np.array([[energy(X[i,j],Y[i,j]) for i in range(len(X))] for j in range(len(X))])
#plt.figure(figsize=(8,6.5))
plt.figure(figsize=figsize)
#im = plt.imshow(Z,interpolation='bilinear',vmin = -5,vmax =5,cmap=cm.Spectral,alpha=0.5)
#plt.colorbar(im)
levels = list(np.arange(0, 10, 0.2))
plt.contourf(X, Y, Z,levels,linestyles = 'solid',cmap=cm.jet, alpha = alpha)
plt.fill_between(xlist, 0, pi, where = ylist <= pi, facecolor='green', alpha = 0.4)
plt.fill_between(xlist, 5*pi, 6*pi, where = xlist >= 5*pi, facecolor='green', alpha = 0.4)
plt.title(title, fontsize = 17)
my_colors = ['red', 'blue','green','black','brown'] + [np.random.rand(3,) for i in range(len(list_of_trajs))]
for i,element in enumerate(list_of_trajs):
if type(line_width) == list:
lw = line_width[i]
else:
lw = line_width
if not discrete[i]:
if color is None:
plt.plot(element[0],element[1], color=my_colors[i], linewidth=lw)
else: plt.plot(element[0],element[1], color=color, linewidth=lw)
else:
xi = np.array(element[0])
x_values = [(int(index/N) + 0.5)*length/N + np.random.normal(0, std) for index in xi ]
y_values = [((length/N)*(index % N + 0.5) + np.random.normal(0, std)) for index in xi ]
if color is None:
plt.plot(x_values, y_values, color=my_colors[i], linewidth=lw)
else: plt.plot(x_values, y_values, color=color, linewidth=lw)
plt.axis([0, length, 0, length])
plt.yticks([i*pi for i in range(7)],[' ','$\pi$','$2\pi$','$3\pi$','$4\pi$','$5\pi$','$6\pi$'],fontsize = 15)
plt.xticks([i*pi for i in range(7)],['0','$\pi$','$2\pi$','$3\pi$','$4\pi$','$5\pi$','$6\pi$'],fontsize = 15)
plt.xlabel('X', fontsize = 13)
plt.ylabel('Y', fontsize = 13)
plt.grid(linewidth = 1,linestyle='--',alpha=0.6)
plt.annotate('A', xy=(pi/8, pi/8), fontsize = 35, color = 'tomato')
plt.annotate('B', xy=(5*pi+4*pi/8, 5*pi+3*pi/8), fontsize = 35, color = 'aqua')
plt.colorbar()
plt.show()
def mc_simulation2D(numsteps):
x = 1; y = 1
mc_traj = []
for i in range(numsteps):
dx = np.random.uniform(-pi,pi)
dy = np.random.uniform(-pi,pi)
if (np.random.random() < exp(-(energy(x+dx,y+dy)-energy(x,y))) ):
x = x + dx; y = y + dy
mc_traj += [[x,y]]
return np.array(mc_traj)
def mapping_function2D(vector2D):
length = 6*pi
#----------
x = vector2D[0]
y = vector2D[1]
return N*int(x*N/length)+int(y*N/length)
| gpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/tests/test_delaunay.py | 14 | 7090 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.cbook import MatplotlibDeprecationWarning
with warnings.catch_warnings():
# the module is deprecated. The tests should be removed when the module is.
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
| gpl-3.0 |
heli522/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
demisto/content | Packs/Phishing/Scripts/PhishingDedupPreprocessingRule/PhishingDedupPreprocessingRule.py | 1 | 14388 | import dateutil # type: ignore
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer
from numpy import dot
from numpy.linalg import norm
from email.utils import parseaddr
import tldextract
from urllib.parse import urlparse
import re
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
pd.options.mode.chained_assignment = None # default='warn'
SIMILARITY_THRESHOLD = float(demisto.args().get('threshold', 0.97))
CLOSE_TO_SIMILAR_DISTANCE = 0.2
EMAIL_BODY_FIELD = 'emailbody'
EMAIL_SUBJECT_FIELD = 'emailsubject'
EMAIL_HTML_FIELD = 'emailbodyhtml'
FROM_FIELD = 'emailfrom'
FROM_DOMAIN_FIELD = 'fromdomain'
MERGED_TEXT_FIELD = 'mereged_text'
MIN_TEXT_LENGTH = 50
DEFAULT_ARGS = {
'limit': '1000',
'incidentTypes': 'Phishing',
'exsitingIncidentsLookback': '100 days ago',
}
FROM_POLICY_TEXT_ONLY = 'TextOnly'
FROM_POLICY_EXACT = 'Exact'
FROM_POLICY_DOMAIN = 'Domain'
FROM_POLICY = FROM_POLICY_TEXT_ONLY
URL_REGEX = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?' \
r'(?:(?:\/|\?)[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?'
IGNORE_INCIDENT_TYPE_VALUE = 'None'
def get_existing_incidents(input_args, current_incident_type):
global DEFAULT_ARGS
get_incidents_args = {}
get_incidents_args['limit'] = input_args.get('limit', DEFAULT_ARGS['limit'])
if 'exsitingIncidentsLookback' in input_args:
get_incidents_args['fromDate'] = input_args['exsitingIncidentsLookback']
elif 'exsitingIncidentsLookback' in DEFAULT_ARGS:
get_incidents_args['fromDate'] = DEFAULT_ARGS['exsitingIncidentsLookback']
status_scope = input_args.get('statusScope', 'All')
query_components = []
if 'query' in input_args:
query_components.append(input_args['query'])
if status_scope == 'ClosedOnly':
query_components.append('status:closed')
elif status_scope == 'NonClosedOnly':
query_components.append('-status:closed')
elif status_scope == 'All':
pass
else:
return_error('Unsupported statusScope: {}'.format(status_scope))
type_values = input_args.get('incidentTypes', current_incident_type)
if type_values != IGNORE_INCIDENT_TYPE_VALUE:
type_field = input_args.get('incidentTypeFieldName', 'type')
type_query = generate_incident_type_query_component(type_field, type_values)
query_components.append(type_query)
if len(query_components) > 0:
get_incidents_args['query'] = ' and '.join('({})'.format(c) for c in query_components)
incidents_query_res = demisto.executeCommand('GetIncidentsByQuery', get_incidents_args)
if is_error(incidents_query_res):
return_error(get_error(incidents_query_res))
incidents = json.loads(incidents_query_res[-1]['Contents'])
return incidents
def generate_incident_type_query_component(type_field_arg, type_values_arg):
type_field = type_field_arg.strip()
type_values = [x.strip() for x in type_values_arg.split(',')]
types_unions = ' '.join(f'"{t}"' for t in type_values)
return f'{type_field}:({types_unions})'
def extract_domain(address):
global no_fetch_extract
if address == '':
return ''
email_address = parseaddr(address)[1]
ext = no_fetch_extract(email_address)
return ext.domain
def get_text_from_html(html):
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def eliminate_urls_extensions(text):
urls_list = re.findall(URL_REGEX, text)
for url in urls_list:
parsed_uri = urlparse(url)
url_with_no_path = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
text = text.replace(url, url_with_no_path)
return text
def preprocess_text_fields(incident):
email_body = email_subject = email_html = ''
if EMAIL_BODY_FIELD in incident:
email_body = incident[EMAIL_BODY_FIELD]
if EMAIL_HTML_FIELD in incident:
email_html = incident[EMAIL_HTML_FIELD]
if EMAIL_SUBJECT_FIELD in incident:
email_subject = incident[EMAIL_SUBJECT_FIELD]
if isinstance(email_html, float):
email_html = ''
if email_body is None or isinstance(email_body, float) or email_body.strip() == '':
email_body = get_text_from_html(email_html)
if isinstance(email_subject, float):
email_subject = ''
text = eliminate_urls_extensions(email_subject + ' ' + email_body)
return text
def preprocess_incidents_df(existing_incidents):
global MERGED_TEXT_FIELD, FROM_FIELD, FROM_DOMAIN_FIELD
incidents_df = pd.DataFrame(existing_incidents)
incidents_df['CustomFields'] = incidents_df['CustomFields'].fillna(value={})
custom_fields_df = incidents_df['CustomFields'].apply(pd.Series)
unique_keys = [k for k in custom_fields_df if k not in incidents_df]
custom_fields_df = custom_fields_df[unique_keys]
incidents_df = pd.concat([incidents_df.drop('CustomFields', axis=1),
custom_fields_df], axis=1).reset_index()
incidents_df[MERGED_TEXT_FIELD] = incidents_df.apply(lambda x: preprocess_text_fields(x), axis=1)
incidents_df = incidents_df[incidents_df[MERGED_TEXT_FIELD].str.len() >= MIN_TEXT_LENGTH]
incidents_df.reset_index(inplace=True)
if FROM_FIELD in incidents_df:
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].fillna(value='')
else:
incidents_df[FROM_FIELD] = ''
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].apply(lambda x: x.strip())
incidents_df[FROM_DOMAIN_FIELD] = incidents_df[FROM_FIELD].apply(lambda address: extract_domain(address))
incidents_df['created'] = incidents_df['created'].apply(lambda x: dateutil.parser.parse(x)) # type: ignore
return incidents_df
def incident_has_text_fields(incident):
text_fields = [EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, EMAIL_BODY_FIELD]
custom_fields = incident.get('CustomFields', []) or []
if any(field in incident for field in text_fields):
return True
elif 'CustomFields' in incident and any(field in custom_fields for field in text_fields):
return True
return False
def filter_out_same_incident(existing_incidents_df, new_incident):
same_id_mask = existing_incidents_df['id'] == new_incident['id']
existing_incidents_df = existing_incidents_df[~same_id_mask]
return existing_incidents_df
def filter_newer_incidents(existing_incidents_df, new_incident):
new_incident_datetime = dateutil.parser.parse(new_incident['created']) # type: ignore
earlier_incidents_mask = existing_incidents_df['created'] < new_incident_datetime
return existing_incidents_df[earlier_incidents_mask]
def vectorize(text, vectorizer):
return vectorizer.transform([text]).toarray()[0]
def cosine_sim(a, b):
return dot(a, b) / (norm(a) * norm(b))
def find_duplicate_incidents(new_incident, existing_incidents_df):
global MERGED_TEXT_FIELD, FROM_POLICY
new_incident_text = new_incident[MERGED_TEXT_FIELD]
text = [new_incident_text] + existing_incidents_df[MERGED_TEXT_FIELD].tolist()
vectorizer = CountVectorizer(token_pattern=r"(?u)\b\w\w+\b|!|\?|\"|\'").fit(text)
new_incident_vector = vectorize(new_incident_text, vectorizer)
existing_incidents_df['vector'] = existing_incidents_df[MERGED_TEXT_FIELD].apply(lambda x: vectorize(x, vectorizer))
existing_incidents_df['similarity'] = existing_incidents_df['vector'].apply(
lambda x: cosine_sim(x, new_incident_vector))
if FROM_POLICY == FROM_POLICY_DOMAIN:
mask = (existing_incidents_df[FROM_DOMAIN_FIELD] != '') & \
(existing_incidents_df[FROM_DOMAIN_FIELD] == new_incident[FROM_DOMAIN_FIELD])
existing_incidents_df = existing_incidents_df[mask]
elif FROM_POLICY == FROM_POLICY_EXACT:
mask = (existing_incidents_df[FROM_FIELD] != '') & \
(existing_incidents_df[FROM_FIELD] == new_incident[FROM_FIELD])
existing_incidents_df = existing_incidents_df[mask]
existing_incidents_df['distance'] = existing_incidents_df['similarity'].apply(lambda x: 1 - x)
tie_breaker_col = 'id'
try:
existing_incidents_df['int_id'] = existing_incidents_df['id'].astype(int)
tie_breaker_col = 'int_id'
except Exception:
pass
existing_incidents_df.sort_values(by=['distance', 'created', tie_breaker_col], inplace=True)
if len(existing_incidents_df) > 0:
return existing_incidents_df.iloc[0], existing_incidents_df.iloc[0]['similarity']
else:
return None, None
def return_entry(message, existing_incident=None, similarity=0):
if existing_incident is None:
similar_incident = {}
else:
similar_incident = {
'rawId': existing_incident['id'],
'id': existing_incident['id'],
'name': existing_incident.get('name'),
'similarity': similarity
}
outputs = {
'similarIncident': similar_incident,
'isSimilarIncidentFound': existing_incident is not None
}
return_outputs(message, outputs)
def close_new_incident_and_link_to_existing(new_incident, existing_incident, similarity):
formatted_incident = format_similar_incident(existing_incident, similarity)
message = tableToMarkdown("Duplicate incident found with similarity {:.1f}%".format(similarity * 100),
formatted_incident)
if demisto.args().get('closeAsDuplicate', 'true') == 'true':
res = demisto.executeCommand("CloseInvestigationAsDuplicate", {
'duplicateId': existing_incident['id']})
if is_error(res):
return_error(res)
message += 'This incident (#{}) will be closed and linked to #{}.'.format(new_incident['id'],
existing_incident['id'])
return_entry(message, existing_incident.to_dict(), similarity)
def create_new_incident():
return_entry('This incident is not a duplicate of an existing incident.')
def format_similar_incident(incident, similairy):
return {'Id': "[%s](#/Details/%s)" % (incident['id'], incident['id']),
'Name': incident['name'],
'Closed': incident.get('closed') != "0001-01-01T00:00:00Z",
'Time': str(incident['created']),
'Email from': incident.get(demisto.args().get('emailFrom')),
'Text Similarity': "{:.1f}%".format(similairy * 100),
}
def create_new_incident_low_similarity(existing_incident, similarity):
message = '## This incident is not a duplicate of an existing incident.\n'
if similarity > SIMILARITY_THRESHOLD - CLOSE_TO_SIMILAR_DISTANCE:
formatted_incident = format_similar_incident(existing_incident, similarity)
message += tableToMarkdown("Most similar incident found", formatted_incident)
message += 'The threshold for considering 2 incidents as duplicate is a similarity ' \
'of {:.1f}%.\n'.format(SIMILARITY_THRESHOLD * 100)
message += 'Therefore these 2 incidents will not be considered as duplicate and the current incident ' \
'will remain active.\n'
return_entry(message)
def create_new_incident_no_text_fields():
text_fields = [EMAIL_BODY_FIELD, EMAIL_HTML_FIELD, EMAIL_SUBJECT_FIELD]
message = 'No text fields were found within this incident: {}.\n'.format(','.join(text_fields))
message += 'Incident will remain active.'
return_entry(message)
def create_new_incident_too_short():
return_entry('Incident text after preprocessing is too short for deduplication. Incident will remain active.')
def main():
global EMAIL_BODY_FIELD, EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, FROM_FIELD, MIN_TEXT_LENGTH, FROM_POLICY
input_args = demisto.args()
EMAIL_BODY_FIELD = input_args.get('emailBody', EMAIL_BODY_FIELD)
EMAIL_SUBJECT_FIELD = input_args.get('emailSubject', EMAIL_SUBJECT_FIELD)
EMAIL_HTML_FIELD = input_args.get('emailBodyHTML', EMAIL_HTML_FIELD)
FROM_FIELD = input_args.get('emailFrom', FROM_FIELD)
FROM_POLICY = input_args.get('fromPolicy', FROM_POLICY)
new_incident = demisto.incidents()[0]
existing_incidents = get_existing_incidents(input_args, new_incident.get('type', IGNORE_INCIDENT_TYPE_VALUE))
demisto.debug('found {} incidents by query'.format(len(existing_incidents)))
if len(existing_incidents) == 0:
create_new_incident()
return
if not incident_has_text_fields(new_incident):
create_new_incident_no_text_fields()
return
new_incident_df = preprocess_incidents_df([new_incident])
if len(new_incident_df) == 0: # len(new_incident_df)==0 means new incident is too short
create_new_incident_too_short()
return
existing_incidents_df = preprocess_incidents_df(existing_incidents)
existing_incidents_df = filter_out_same_incident(existing_incidents_df, new_incident)
existing_incidents_df = filter_newer_incidents(existing_incidents_df, new_incident)
if len(existing_incidents_df) == 0:
create_new_incident()
return
new_incident_preprocessed = new_incident_df.iloc[0].to_dict()
duplicate_incident_row, similarity = find_duplicate_incidents(new_incident_preprocessed,
existing_incidents_df)
if duplicate_incident_row is None:
create_new_incident()
return
if similarity < SIMILARITY_THRESHOLD:
create_new_incident_low_similarity(duplicate_incident_row, similarity)
else:
return close_new_incident_and_link_to_existing(new_incident_df.iloc[0], duplicate_incident_row, similarity)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| mit |
ntbrewer/Pyspectr | other/spectrum_profiler.py | 2 | 2296 | #!/usr/bin/env python3
"""
K. Miernik 2012
[email protected]
Performes fit to the decay part for all channels in E vs time spectrum
"""
import sys
import argparse
import math
import numpy
from lmfit import minimize, Parameters, report_errors
import matplotlib.pyplot as plt
import Pyspectr.hisfile as hisfile
class GeneralError(Exception):
"""General error class
"""
def __init__(self, msg = ''):
self.msg = msg
def __str__(self):
return repr(self.msg)
def decay(params, data_x):
T1 = params['T1'].value
A = params['A'].value
tau = params['tau'].value
return A * numpy.exp(-(data_x - T1) / tau)
def residual(params, data_x, data_y, data_dy):
model = fitfunc(params, data_x)
return (data_y - model) / data_dy
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('in_file', help='Input file')
args = parser.parse_args()
hisId = 2681
T1 = 200
T2 = 300
his = hisfile.HisFile(args.in_file)
dim, xaxis, yaxis, data = his.load_histogram(hisId)
#data = data.transpose()
fitfunc = decay
params = Parameters()
params.add('T1', value=T1, vary=False)
params.add('A', value=100.0, min=0.0)
params.add('tau', value=100.0, min=0.0)
sys.stderr.write('.')
symbol = 0
for E in range(2, data.shape[0] - 1, 3):
symbol += 1
data_slice = sum(data[E-1:E+2])[T1:T2]
dy = numpy.sqrt(numpy.abs(data_slice))
for i, v in enumerate(dy):
if dy[i] == 0:
dy[i] = 1.0
data_sum_err = math.sqrt(dy.sum())
if data_slice.sum() - data_sum_err <= 0:
continue
params['A'].value = 100.0
params['tau'].value = 100.0
result = minimize(residual, params,
args=(yaxis[T1:T2], data_slice, dy))
scale = 0.01
print(E, result.params['tau'].value * scale * math.log(2),
result.params['tau'].stderr * scale * math.log(2))
sys.stderr.write('\r')
if symbol % 3 == 0:
sys.stderr.write('.')
elif symbol % 3 == 1:
sys.stderr.write('o')
else:
sys.stderr.write('*')
sys.stderr.write('\n')
| gpl-3.0 |
heplesser/nest-simulator | pynest/examples/clopath_synapse_small_network.py | 8 | 7493 | # -*- coding: utf-8 -*-
#
# clopath_synapse_small_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Bidirectional connections
---------------------------------------
This script simulates a small network of ten excitatory and three
inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected
and driven by 500 Poisson generators. The synapses from the Poisson generators
to the excitatory population and those among the neurons of the network
are Clopath synapses. The rate of the Poisson generators is modulated with
a Gaussian profile whose center shifts randomly each 100 ms between ten
equally spaced positions.
This setup demonstrates that the Clopath synapse is able to establish
bidirectional connections. The example is adapted from [1]_ (cf. fig. 5).
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
import random
##############################################################################
# Set the parameters
simulation_time = 1.0e4
resolution = 0.1
delay = resolution
# Poisson_generator parameters
pg_A = 30. # amplitude of Gaussian
pg_sigma = 10. # std deviation
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
# Create neurons and devices
nrn_model = 'aeif_psc_delta_clopath'
nrn_params = {'V_m': -30.6,
'g_L': 30.0,
'w': 0.0,
'tau_plus': 7.0,
'tau_minus': 10.0,
'tau_w': 144.0,
'a': 4.0,
'C_m': 281.0,
'Delta_T': 2.0,
'V_peak': 20.0,
't_clamp': 2.0,
'A_LTP': 8.0e-6,
'A_LTD': 14.0e-6,
'A_LTD_const': False,
'b': 0.0805,
'u_ref_squared': 60.0**2}
pop_exc = nest.Create(nrn_model, 10, nrn_params)
pop_inh = nest.Create(nrn_model, 3, nrn_params)
##############################################################################
# We need parrot neurons since Poisson generators can only be connected
# with static connections
pop_input = nest.Create('parrot_neuron', 500) # helper neurons
pg = nest.Create('poisson_generator', 500)
wr = nest.Create('weight_recorder')
##############################################################################
# First connect Poisson generators to helper neurons
nest.Connect(pg, pop_input, 'one_to_one', {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay})
##############################################################################
# Create all the connections
nest.CopyModel('clopath_synapse', 'clopath_input_to_exc',
{'Wmax': 3.0})
conn_dict_input_to_exc = {'rule': 'all_to_all'}
syn_dict_input_to_exc = {'synapse_model': 'clopath_input_to_exc',
'weight': nest.random.uniform(0.5, 2.0),
'delay': delay}
nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc,
syn_dict_input_to_exc)
# Create input->inh connections
conn_dict_input_to_inh = {'rule': 'all_to_all'}
syn_dict_input_to_inh = {'synapse_model': 'static_synapse',
'weight': nest.random.uniform(0.0, 0.5),
'delay': delay}
nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh)
# Create exc->exc connections
nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc',
{'Wmax': 0.75, 'weight_recorder': wr})
syn_dict_exc_to_exc = {'synapse_model': 'clopath_exc_to_exc', 'weight': 0.25,
'delay': delay}
conn_dict_exc_to_exc = {'rule': 'all_to_all', 'allow_autapses': False}
nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc)
# Create exc->inh connections
syn_dict_exc_to_inh = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8}
nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh)
# Create inh->exc connections
syn_dict_inh_to_exc = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6}
nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc)
##############################################################################
# Randomize the initial membrane potential
pop_exc.V_m = nest.random.normal(-60., 25.)
pop_inh.V_m = nest.random.normal(-60., 25.)
##############################################################################
# Simulation divided into intervals of 100ms for shifting the Gaussian
sim_interval = 100.
for i in range(int(simulation_time/sim_interval)):
# set rates of poisson generators
rates = np.empty(500)
# pg_mu will be randomly chosen out of 25,75,125,...,425,475
pg_mu = 25 + random.randint(0, 9) * 50
for j in range(500):
rates[j] = pg_A * np.exp((-1 * (j - pg_mu)**2) / (2 * pg_sigma**2))
pg[j].rate = rates[j]*1.75
nest.Simulate(sim_interval)
##############################################################################
# Plot results
fig, ax = plt.subplots(1, sharex=False)
# Plot synapse weights of the synapses within the excitatory population
# Sort weights according to sender and reshape
exc_conns = nest.GetConnections(pop_exc, pop_exc)
exc_conns_senders = np.array(exc_conns.source)
exc_conns_targets = np.array(exc_conns.target)
exc_conns_weights = np.array(exc_conns.weight)
idx_array = np.argsort(exc_conns_senders)
targets = np.reshape(exc_conns_targets[idx_array], (10, 10 - 1))
weights = np.reshape(exc_conns_weights[idx_array], (10, 10 - 1))
# Sort according to target
for i, (trgs, ws) in enumerate(zip(targets, weights)):
idx_array = np.argsort(trgs)
weights[i] = ws[idx_array]
weight_matrix = np.zeros((10, 10))
tu9 = np.triu_indices_from(weights)
tl9 = np.tril_indices_from(weights, -1)
tu10 = np.triu_indices_from(weight_matrix, 1)
tl10 = np.tril_indices_from(weight_matrix, -1)
weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]]
weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]]
# Difference between initial and final value
init_w_matrix = np.ones((10, 10))*0.25
init_w_matrix -= np.identity(10)*0.25
cax = ax.imshow(weight_matrix - init_w_matrix)
cbarB = fig.colorbar(cax, ax=ax)
ax.set_xticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_yticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_xlabel("to neuron")
ax.set_ylabel("from neuron")
ax.set_title("Change of syn weights before and after simulation")
plt.show()
| gpl-2.0 |
soylentdeen/Graffity | src/LoopAnalysis/IRIS.py | 1 | 5783 | import scipy
import numpy
from matplotlib import pyplot
import Graffity
import glob
import time
import astropy.io.fits as pyfits
from scipy import ndimage
#IRIS_Data = pyfits.getdata('/home/cdeen/Data/CIAO/JanComm/IRIS20170112/hd_22468_low_DIT.fits') - \
# numpy.mean(Background, axis=0)
#IRIS_Data = pyfits.getdata('/home/cdeen/Data/CIAO/JanComm/IRIS20170112/v368_pup_low_DIT.fits') - \
# numpy.mean(Background, axis=0)
#IRIS_Data = pyfits.getdata('/home/cdeen/Data/CIAO/JanComm/IRIS20170112/wds_j03575_DIT.fits') - \
# numpy.mean(Background, axis=0)
def calcStrehl(base='', sens='', airy='', blah=False):
if blah:
base_dir = '/home/cdeen/Data/CIAO/JanComm/IRIS20170108/'
Background = pyfits.getdata(base_dir+'HD41_BACKGROUND_1_DIT.fits')
if sens == 'high':
IRIS_Data = pyfits.getdata(base_dir+'HD41_AVC_ON_1_DIT.fits')-numpy.mean(Background, axis=0)
IRIS_Header = pyfits.getheader(base_dir+'HD41_AVC_ON_1_DIT.fits')
else:
IRIS_Data = pyfits.getdata(base_dir+'HD41_faint_AVC_ON_1_DIT.fits')-numpy.mean(Background, axis=0)
IRIS_Header = pyfits.getheader(base_dir+'HD41_faint_AVC_ON_1_DIT.fits')
print IRIS_Header.get("DATE-OBS")
else:
base_dir = '/home/cdeen/Data/CIAO/JanComm/IRIS20170112/'
Background=pyfits.getdata(base_dir+'iris_background_5ms_DIT.fits')
IRIS_Data = pyfits.getdata(base_dir + base + '_' + sens + '_DIT.fits') - numpy.mean(Background, axis=0)
UT1 = IRIS_Data[:, :32, :]
UT2 = IRIS_Data[:, 32:64, :]
UT3 = IRIS_Data[:, 64:96, :]
UT4 = IRIS_Data[:, 96:, :]
#UT1 = numpy.mean(UT1, axis=0)
#UT2 = numpy.mean(UT2, axis=0)
#UT3 = numpy.mean(UT3, axis=0)
#UT4 = numpy.mean(UT4, axis=0)
SR = {}
centroid = {}
for UT, stack in zip([1, 2, 3, 4], [UT1, UT2, UT3, UT4]):
SR[UT] = []
centroid[UT] = []
for frame in stack:
frame = frame / numpy.max(frame)
frame[frame < 0] = 0.0
SR[UT].append(numpy.max(frame/numpy.sum(frame)*numpy.sum(airy)))
centroid[UT].append(ndimage.measurements.center_of_mass(frame))
SR[UT] = numpy.array(SR[UT])
centroid[UT] = numpy.std(numpy.array(centroid[UT]), axis=0)*31.5
return SR, centroid
fig0 = pyplot.figure(0)
fig0.clear()
ax0 = fig0.add_axes([0.1, 0.1, 0.8, 0.8])
fig1 = pyplot.figure(1)
fig1.clear()
ax1 = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
PSF = Graffity.PSF(sizeInPix=32, lam=2.2, pscale = 31.5)
PSF.generateOTF()
airy = PSF.getPSF()
#stars = ['v368_pup', 'hd_22468', 'wds_j03575']
stars = ['v368_pup', 'wds_j03575']
sensitivities = ['low', 'medium', 'high']
#separations = [11.8, 6.7, 11.1]
separations = [11.8, 11.1]
SR = {}
TTJit = {}
for star, sep in zip(stars, separations):
SR[star] = {}
SR[star]["separation"] = sep
TTJit[star] = {}
TTJit[star] = {}
for sens in sensitivities:
blah = calcStrehl(base=star, sens=sens, airy=airy)
SR[star][sens] = blah[0]
TTJit[star][sens] = blah[1]
print("%s %s" % (star, sens))
print("CIAO 1: %.3f" % (numpy.mean(SR[star][sens][1])))
print("CIAO 2: %.3f" % (numpy.mean(SR[star][sens][2])))
print("CIAO 3: %.3f" % (numpy.mean(SR[star][sens][3])))
print("CIAO 4: %.3f" % (numpy.mean(SR[star][sens][4])))
low, ttj_low = calcStrehl(sens='low', airy=airy, blah = True)
high, ttj_high = calcStrehl(sens='high', airy=airy, blah = True)
SR["HD41"] = {}
SR["HD41"]["separation"] = 5.58
SR["HD41"]["medium"] = low
SR["HD41"]["high"] = high
SR["HD41"]["low"] = low
TTJit["HD41"] = {}
TTJit["HD41"]["separation"] = 5.58
TTJit["HD41"]["medium"] = ttj_low
TTJit["HD41"]["high"] = ttj_high
TTJit["HD41"]["low"] = ttj_low
separations = []
strehl_1 = []
strehl_2 = []
strehl_3 = []
strehl_4 = []
ttx_1 = []
tty_1 = []
ttx_2 = []
tty_2 = []
ttx_3 = []
tty_3 = []
ttx_4 = []
tty_4 = []
sens = []
for star in SR.keys():
for s in sensitivities:
separations.append(SR[star]["separation"])
strehl_1.append(numpy.mean(SR[star][s][1]))
strehl_2.append(numpy.mean(SR[star][s][2]))
strehl_3.append(numpy.mean(SR[star][s][3]))
strehl_4.append(numpy.mean(SR[star][s][4]))
ttx_1.append(TTJit[star][s][1][0])
tty_1.append(TTJit[star][s][1][1])
ttx_2.append(TTJit[star][s][2][0])
tty_2.append(TTJit[star][s][2][1])
ttx_3.append(TTJit[star][s][3][0])
tty_3.append(TTJit[star][s][3][1])
ttx_4.append(TTJit[star][s][4][0])
tty_4.append(TTJit[star][s][4][1])
ax0.scatter(separations, ttx_1, color = 'b', marker='x', s=40.0)
ax0.scatter(numpy.array(separations)+0.1, tty_1, color = 'b', marker='+', s=40.0)
ax0.scatter(separations, ttx_2, color = 'g', marker='x', s=40.0)
ax0.scatter(numpy.array(separations)+0.1, tty_2, color = 'g', marker='+', s=40.0)
ax0.scatter(separations, ttx_3, color = 'r', marker='x', s=40.0)
ax0.scatter(numpy.array(separations)+0.1, tty_3, color = 'r', marker='+', s=40.0)
ax0.scatter(separations, ttx_4, color = 'm', marker='x', s=40.0)
ax0.scatter(numpy.array(separations)+0.1, tty_4, color = 'm', marker='+', s=40.0)
ax0.set_title("UT1 - Tip/Tilt Jitter vs Separation")
ax0.set_ylabel("Tip/Tilt Jitter (mas)")
#ax0.scatter(separations, strehl_1, color='b')
#ax0.scatter(separations, strehl_2, color='g')
#ax0.scatter(separations, strehl_3, color='r')
#ax0.scatter(separations, strehl_4, color='m')
#ax0.set_title("UT1 - Strehl vs Separation")
ax0.set_xlabel("Separation - arcsec")
#ax0.set_ylabel("Strehl Ratio (IRIS)")
#ax0.matshow(airy)
#ax1.matshow(numpy.mean(UT4, axis=0))
fig0.show()
fig0.savefig("TTJ_v_Separation.png")
#fig0.savefig("SR_v_Separation.png")
#fig1.show()
| mit |
alphaBenj/zipline | tests/test_panel_bar_reader.py | 6 | 3677 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import permutations, product
import numpy as np
import pandas as pd
from zipline.data.us_equity_pricing import PanelBarReader
from zipline.testing import ExplodingObject
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
)
from zipline.utils.calendars import get_calendar
class WithPanelBarReader(WithAssetFinder):
@classmethod
def init_class_fixtures(cls):
super(WithPanelBarReader, cls).init_class_fixtures()
finder = cls.asset_finder
trading_calendar = get_calendar('NYSE')
items = finder.retrieve_all(finder.sids)
major_axis = (
trading_calendar.sessions_in_range if cls.FREQUENCY == 'daily'
else trading_calendar.minutes_for_sessions_in_range
)(cls.START_DATE, cls.END_DATE)
minor_axis = ['open', 'high', 'low', 'close', 'volume']
shape = tuple(map(len, [items, major_axis, minor_axis]))
raw_data = np.arange(shape[0] * shape[1] * shape[2]).reshape(shape)
cls.panel = pd.Panel(
raw_data,
items=items,
major_axis=major_axis,
minor_axis=minor_axis,
)
cls.reader = PanelBarReader(trading_calendar, cls.panel, cls.FREQUENCY)
def test_get_value(self):
panel = self.panel
reader = self.reader
for asset, date, field in product(*panel.axes):
self.assertEqual(
panel.loc[asset, date, field],
reader.get_value(asset, date, field),
)
def test_duplicate_values(self):
UNIMPORTANT_VALUE = 57
panel = pd.Panel(
UNIMPORTANT_VALUE,
items=['a', 'b', 'b', 'a'],
major_axis=['c'],
minor_axis=['d'],
)
unused = ExplodingObject()
axis_names = ['items', 'major_axis', 'minor_axis']
for axis_order in permutations((0, 1, 2)):
transposed = panel.transpose(*axis_order)
with self.assertRaises(ValueError) as e:
PanelBarReader(unused, transposed, 'daily')
expected = (
"Duplicate entries in Panel.{name}: ['a', 'b'].".format(
name=axis_names[axis_order.index(0)],
)
)
self.assertEqual(str(e.exception), expected)
def test_sessions(self):
sessions = self.reader.sessions
self.assertEqual(self.NUM_SESSIONS, len(sessions))
self.assertEqual(self.START_DATE, sessions[0])
self.assertEqual(self.END_DATE, sessions[-1])
class TestPanelDailyBarReader(WithPanelBarReader,
ZiplineTestCase):
FREQUENCY = 'daily'
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-02-01', tz='utc')
NUM_SESSIONS = 21
class TestPanelMinuteBarReader(WithPanelBarReader,
ZiplineTestCase):
FREQUENCY = 'minute'
START_DATE = pd.Timestamp('2015-12-23', tz='utc')
END_DATE = pd.Timestamp('2015-12-24', tz='utc')
NUM_SESSIONS = 2
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/tests/test_rcparams.py | 1 | 4807 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
import matplotlib as mpl
from matplotlib.tests import assert_str_equal
from nose.tools import assert_true, assert_raises
import nose
mpl.rc('text', usetex=False)
mpl.rc('lines', linewidth=22)
fname = os.path.join(os.path.dirname(__file__), 'test_rcparams.rc')
def test_rcparams():
usetex = mpl.rcParams['text.usetex']
linewidth = mpl.rcParams['lines.linewidth']
# test context given dictionary
with mpl.rc_context(rc={'text.usetex': not usetex}):
assert mpl.rcParams['text.usetex'] == (not usetex)
assert mpl.rcParams['text.usetex'] == usetex
# test context given filename (mpl.rc sets linewdith to 33)
with mpl.rc_context(fname=fname):
assert mpl.rcParams['lines.linewidth'] == 33
assert mpl.rcParams['lines.linewidth'] == linewidth
# test context given filename and dictionary
with mpl.rc_context(fname=fname, rc={'lines.linewidth': 44}):
assert mpl.rcParams['lines.linewidth'] == 44
assert mpl.rcParams['lines.linewidth'] == linewidth
# test rc_file
try:
mpl.rc_file(fname)
assert mpl.rcParams['lines.linewidth'] == 33
finally:
mpl.rcParams['lines.linewidth'] = linewidth
def test_RcParams_class():
rc = mpl.RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.weight': 'normal',
'font.size': 12})
if six.PY3:
expected_repr = """
RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.size': 12,
'font.weight': 'normal'})""".lstrip()
else:
expected_repr = """
RcParams({u'font.cursive': [u'Apple Chancery',
u'Textile',
u'Zapf Chancery',
u'cursive'],
u'font.family': u'sans-serif',
u'font.size': 12,
u'font.weight': u'normal'})""".lstrip()
assert_str_equal(expected_repr, repr(rc))
if six.PY3:
expected_str = """
font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive']
font.family: sans-serif
font.size: 12
font.weight: normal""".lstrip()
else:
expected_str = """
font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'cursive']
font.family: sans-serif
font.size: 12
font.weight: normal""".lstrip()
assert_str_equal(expected_str, str(rc))
# test the find_all functionality
assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]').keys())
assert ['font.family'] == list(six.iterkeys(rc.find_all('family')))
def test_Bug_2543():
# Test that it possible to add all values to itself / deepcopy
# This was not possible because validate_bool_maybe_none did not
# accept None as an argument.
# https://github.com/matplotlib/matplotlib/issues/2543
with mpl.rc_context():
_copy = mpl.rcParams.copy()
for key in six.iterkeys(_copy):
mpl.rcParams[key] = _copy[key]
mpl.rcParams['text.dvipnghack'] = None
with mpl.rc_context():
from copy import deepcopy
_deep_copy = deepcopy(mpl.rcParams)
from matplotlib.rcsetup import validate_bool_maybe_none, validate_bool
# real test is that this does not raise
assert_true(validate_bool_maybe_none(None) is None)
assert_true(validate_bool_maybe_none("none") is None)
_fonttype = mpl.rcParams['svg.fonttype']
assert_true(_fonttype == mpl.rcParams['svg.embed_char_paths'])
with mpl.rc_context():
mpl.rcParams['svg.embed_char_paths'] = False
assert_true(mpl.rcParams['svg.fonttype'] == "none")
def test_Bug_2543_newer_python():
# only split from above because of the usage of assert_raises
# as a context manager, which only works in 2.7 and above
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager not supported with Python < 2.7")
from matplotlib.rcsetup import validate_bool_maybe_none, validate_bool
with assert_raises(ValueError):
validate_bool_maybe_none("blah")
with assert_raises(ValueError):
validate_bool(None)
with assert_raises(ValueError):
with mpl.rc_context():
mpl.rcParams['svg.fonttype'] = True
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
ngoix/OCRF | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/utils/tests/test_extmath.py | 12 | 23419 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
gscarella/pyOptFEM | pyOptFEM/common.py | 1 | 5804 | from scipy import sparse
import matplotlib.pyplot as plt
import os, errno, ctypes
from numpy import log
def NormInf(A):
"""This function returns the norm Inf of a *Scipy* sparse Matrix
:param A: A *Scipy* sparse matrix
:returns: norm Inf of A given by :math:`\| A\|_\infty=\max_{i,j}(|A_{i,j}|)`.
"""
if (A.data.shape[0]==0):
return 0
else:
return max(abs(A.data))
def showSparsity(M):
# from matplotlib.pyplot as plt
plt.spy(M, precision=1e-8, marker='.', markersize=3)
plt.show()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def BasesChoice(Num):
if Num==0:
ext='BaBa'
cbase='global alternate numbering with local alternate numbering'
elif Num==1:
ext='BaBb'
cbase='global block numbering with local alternate numbering'
elif Num==2:
ext='BbBa'
cbase='global alternate numbering with local block numbering'
elif Num==3:
ext='BbBb'
cbase='global block numbering with local block numbering'
return ext,cbase
def PrintResultsSphinx(versions,LN,Lnq,Lndof,T):
nV=len(versions)
nN=len(LN)
Sep1='+{:-^8}'.format("")*3 + '+{:-^14}'.format("")*nV+'+'
Sep2='+{:=^8}'.format("")*3 + '+{:=^14}'.format("")*nV+'+'
Sep3='|{:^8}'.format("")*3 + '+{:-^14}'.format("")*nV+'+'
Tit='|{:^8}'.format('N')+'|{:^8}'.format('nq')+'|{:^8}'.format('ndof')
for i in range(0,nV):
Tit+='|{:^14}'.format(versions[i])
Tit+='|'
print(Sep1)
print(Tit)
print(Sep2)
for n in range(0,nN):
S1='|{:^8}'.format('%d' % LN[n])+'|{:^8}'.format('%d' % Lnq[n])+'|{:^8}'.format('%d' % Lndof[n])
S2='|{:^8}'.format("")*3
for v in range(0,nV):
S1+='|{:^14}'.format('%.4f(s)' % T[n,v])
if (T[n,0]<1e-6):
S2+='|{:^14}'.format('x%s' % ('NaN'))
else:
S2+='|{:^14}'.format('x%4.2f' % (T[n,v]/T[n,0]))
S1+='|'
S2+='|'
print(S1)
print(Sep1)
print(S2)
print(Sep1)
def PrintResultsLatexTabular(FileName,versions,LN,Lnq,Lndof,T):
nV=len(versions)
nN=len(LN)
fp = open(FileName, 'wt')
fp.write(format('\\begin{tabular}{@{}|r|r||*{%d}{@{}c@{}|}@{}}\n' % nV))
fp.write(' \\hline\n')
fp.write(' $n_q$ & $n_{dof}$')
for v in range(0,nV):
fp.write(' & '+versions[v])
fp.write(' \\\\ \\hline \\hline\n')
for n in range(0,nN):
fp.write(format(' $%d$ & $%d$ ' % (Lnq[n],Lndof[n])))
for v in range(0,nV):
if T[n,0]<1e-8:
fp.write(format('& \\begin{tabular}{c} %.3f (s) \\\\ \\texttt{x %s} \\end{tabular} ' %(T[n,v],'NaN')))
else:
fp.write(format('& \\begin{tabular}{c} %.3f (s) \\\\ \\texttt{x %.3f} \\end{tabular} ' %(T[n,v],T[n,v]/T[n,0])))
fp.write('\\\\ \\hline\n')
fp.write('\\end{tabular}')
def checkVersions(versions,VersionList):
for i in range(0,len(versions)):
if versions[i] not in VersionList:
return False
return True
def plotBench(versions,Lndof,T):
import matplotlib.pyplot as plt
nV=len(versions)
if T.min()<1e-8:
return 0
plt.loglog(Lndof,T[0,0]*Lndof/Lndof[0],'k--',label="$O(n)$")
plt.loglog(Lndof,T[0,0]*Lndof*log(Lndof)/(Lndof[0]*log(Lndof[0])),'k.-',label="$O(nlog(n))$")
for i in range(1,nV):
plt.loglog(Lndof,T[0,i]*Lndof/Lndof[0],'k--')
plt.loglog(Lndof,T[0,i]*Lndof*log(Lndof)/(Lndof[0]*log(Lndof[0])),'k.-')
for i in range(0,nV):
plt.loglog(Lndof,T[:,i],label=versions[i])
#plt.legend(loc='lower right')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.grid()
plt.xlabel('$n=n_{dof}$')
plt.ylabel('cputime(s)')
if nV<=3:
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=nV+2, mode="expand", borderaxespad=0.)
else:
plt.legend(loc='upper left')
return plt
def printReport(FileName,assembly,Release):
fp = open(FileName+'_report.tex', 'wt')
basename=os.path.basename(FileName)
PWD=os.path.realpath('.')
fp.write('\\documentclass{article}\n');
fp.write(format('\\input{%s/report.sty}\n' % PWD));
fp.write(format('\\title{Automatic bench report : \\texttt{%s} functions under Python (%s) }\n' % (assembly,Release)))
fp.write('\\begin{document}\n');
fp.write('\\maketitle\n');
fp.write(format('\\inputtabular{%s}\n{cputimes and speedup}\n\n' % basename+'.tex'))
fp.write(format('\\imageps{%s}{0.5}\n' % basename+'.eps'))
fp.write('\\end{document}\n')
class memoryCheck():
"""Checks memory of a given system"""
def __init__(self):
if os.name == "posix":
self.value = self.linuxRam()
elif os.name == "nt":
self.value = self.windowsRam()
else:
print("I only work with Win or Linux :P")
def windowsRam(self):
"""Uses Windows API to check RAM in this OS"""
kernel32 = ctypes.windll.kernel32
c_ulong = ctypes.c_ulong
class MEMORYSTATUS(ctypes.Structure):
_fields_ = [
("dwLength", c_ulong),
("dwMemoryLoad", c_ulong),
("dwTotalPhys", c_ulong),
("dwAvailPhys", c_ulong),
("dwTotalPageFile", c_ulong),
("dwAvailPageFile", c_ulong),
("dwTotalVirtual", c_ulong),
("dwAvailVirtual", c_ulong)
]
memoryStatus = MEMORYSTATUS()
memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS)
kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus))
return int(memoryStatus.dwTotalPhys/1024**2)
def linuxRam(self):
"""Returns the RAM of a linux system"""
totalMemory = os.popen("free -m").readlines()[1].split()[1]
return int(totalMemory)
| gpl-3.0 |
guilhermeprates/ML-Sandbox | tensorflow/mnist.py | 1 | 1281 | import numpy as np
import matplotlib.pyplot as plt
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def display(i):
img = test_data[i]
plt.title('Example %d. Label: %d' % (i, test_labels[i]))
plt.imshow(img.reshape((28, 28)), cmap=plt.cm.gray_r)
plt.show()
learn = tf.contrib.learn
tf.logging.set_verbosity(tf.logging.ERROR)
mnist = learn.datasets.load_dataset('mnist')
data = mnist.train.images
labels = np.asarray(mnist.train.labels, dtype=np.int32)
test_data = mnist.test.images
test_labels = np.asarray(mnist.test.labels, dtype=np.int32)
max_examples = 10000
data = data[:max_examples]
labels = labels[:max_examples]
# display(0)
# display(1)
# display(8)
# print(len(data[0]))
feature_columns = learn.infer_real_valued_columns_from_input(data)
classifier = learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=10
)
classifier.fit(data, labels, batch_size=100, steps=1000)
classifier.evaluate(test_data, test_labels)
print(classifier.evaluate(test_data, test_labels)["accuracy"])
samples = np.array([test_data[1]], dtype=float)
y = list(classifier.predict(samples, as_iterable=True))
print('Predictions: {}'.format(str(y)))
print ("Predicted %s, Label: %d" % (str(y), test_labels[1]))
display(1)
| mit |
byuflowlab/gaussian-wake | doc/compare_all_sowfa.py | 1 | 44719 | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import cPickle as pickle
from openmdao.api import Problem
from wakeexchange.OptimizationGroups import OptAEP
from wakeexchange.gauss import gauss_wrapper, add_gauss_params_IndepVarComps
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
from wakeexchange.utilities import sunflower_points
def plot_data_vs_model(ax=None, datax=np.zeros(0), datay=np.zeros(0), modelx=np.zeros(0),
modely=np.zeros(0), title='', xlabel='', ylabel='', datalabel='',
modellabel='', modelcolor='r', modelline='--', xscalar=1./126.4, yscalar=1E-3,
sum=True, front=True, second=True, legend=True):
if ax is None:
fig = plt.figure()
ax = fig.gca()
# plot data
if datax.size > 0:
if front:
ax.plot(datax*xscalar, datay[:, 0]*yscalar, 'o', mec='k', mfc='none', label=datalabel)
if second:
ax.plot(datax*xscalar, datay[:, 1]*yscalar, '^', mec='k', mfc='none')
if sum:
ax.plot(datax*xscalar, datay[:, 0]*yscalar+datay[:, 1]*yscalar, 'ks', mec='k', mfc='none')
# plot model
if modelx.size > 0:
# plot model
if front:
ax.plot(modelx*xscalar, modely[:, 0]*yscalar, modelline+modelcolor, label=modellabel)
if second:
ax.plot(modelx*xscalar, modely[:, 1]*yscalar, modelline+modelcolor)
if sum:
ax.plot(modelx*xscalar, modely[:, 0]*yscalar+modely[:, 1]*yscalar, modelline+'k')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if legend:
ax.legend()
# quit()
# quit()
return ax
def setup_probs():
nTurbines = 2
nDirections = 1
rotorDiameter = 126.4
rotorArea = np.pi*rotorDiameter*rotorDiameter/4.0
axialInduction = 1.0/3.0
CP = 0.7737/0.944 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
# CP =0.768 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
CT = 4.0*axialInduction*(1.0-axialInduction)
generator_efficiency = 0.944
# Define turbine characteristics
axialInduction = np.array([axialInduction, axialInduction])
rotorDiameter = np.array([rotorDiameter, rotorDiameter])
generatorEfficiency = np.array([generator_efficiency, generator_efficiency])
yaw = np.array([0., 0.])
hubHeight = np.array([90.0, 90.0])
# Define site measurements
wind_direction = 270.-0.523599*180./np.pi
wind_speed = 8. # m/s
air_density = 1.1716
Ct = np.array([CT, CT])
Cp = np.array([CP, CP])
gauss_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=gauss_wrapper, datasize=0, minSpacing=2.0,
params_IdepVar_func=add_gauss_params_IndepVarComps,
params_IndepVar_args={}))
floris_options = {'differentiable': True, 'nSamples': 0, 'use_rotor_components': False}
floris_prob_orig = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=floris_options, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
floris_prob_tuned = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=floris_options, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
probs = [gauss_prob, floris_prob_orig, floris_prob_tuned]
for prob in probs:
prob.setup()
if prob is floris_prob_orig or prob is floris_prob_tuned:
prob['model_params:useWakeAngle'] = True
turbineX = np.array([1118.1, 1881.9])
turbineY = np.array([1279.5, 1720.5])
# prob['gen_params:CTcorrected'] = False
# prob['gen_params:CPcorrected'] = False
prob['turbineX'] = turbineX
prob['turbineY'] = turbineY
prob['rotorDiameter'] = rotorDiameter
prob['axialInduction'] = axialInduction
prob['generatorEfficiency'] = generatorEfficiency
prob['air_density'] = air_density
prob['Cp_in'] = Cp
prob['Ct_in'] = Ct
prob['windSpeeds'] = np.array([wind_speed])
prob['windDirections'] = np.array([wind_direction])
prob['hubHeight'] = hubHeight
if prob is gauss_prob:
sort_turbs = True
wake_combination_method = 1 # can be [0:Linear freestreem superposition,
# 1:Linear upstream velocity superposition,
# 2:Sum of squares freestream superposition,
# 3:Sum of squares upstream velocity superposition]
ti_calculation_method = 0 # can be [0:No added TI calculations,
# 1:TI by Niayifar and Porte Agel altered by Annoni and Thomas,
# 2:TI by Niayifar and Porte Agel 2016,
# 3:no yet implemented]
calc_k_star = True
z_ref = 90.0
z_0 = 0.001
k_calc = 0.065
# tuned with 1 rotor point: error_turbine2: 380593.475508 ky: 0.0147484983033 kz: 0.0365360001244 I: 1.0 shear_exp: 0.0804912726779
# tuned with 500 rotor points: error_turbine2: 505958.824163 ky: 0.010239469297 kz: 0.0187826477801 I: 0.5 shear_exp: 0.115698347406
# tuned with 1000 rotor points: error_turbine2: 440240.45048 ky: 0.0132947699754 kz: 0.0267832386866 I: 0.149427342515 shear_exp: 0.107996557048
# tuned with k_star and 1000 rotor points: error_turbine2: 759565.303289 ky: 0.065 kz: 0.065 I: 0.0765060707278 shear_exp: 0.104381464423
# using NPA to calculate initial spreading, but then letting BPA adjust it with TI after that. 1000 rotor points
# error_turbine2: 759565.279351 ky: 0.0330333796913 kz: 0.0330333796913 I: 0.0765060716478 shear_exp: 0.104381467026
# using NPA to calculate initial spreading, but then letting BPA adjust it with TI after that. 16 rotor points
# error_turbine2: 642639.730582 ky: 0.0307280539404 kz: 0.0307280539404 I: 0.0704979253074 shear_exp: 0.108435318499
# tuning only shear_exp with 16 rotor points: error_turbine2: 779216.077341 ky: 0.0267 kz: 0.0267 I: 0.06 shear_exp: 0.161084449732
I = .063 # + 0.04
# I = .06
ky = 0.3837*I + 0.003678
# ky = 0.022
kz = 0.3837*I + 0.003678
# kz = 0.022
# shear_exp = 0.161084449732
shear_exp = 0.11
nRotorPoints = 1
prob['model_params:wake_combination_method'] = wake_combination_method
prob['model_params:ti_calculation_method'] = ti_calculation_method
prob['model_params:calc_k_star'] = calc_k_star
prob['model_params:sort'] = sort_turbs
prob['model_params:z_ref'] = z_ref
prob['model_params:z_0'] = z_0
prob['model_params:ky'] = ky
prob['model_params:kz'] = kz
prob['model_params:I'] = I
prob['model_params:shear_exp'] = shear_exp
print "in gauss setup"
if nRotorPoints > 1:
prob['model_params:RotorPointsY'], prob['model_params:RotorPointsZ'] = sunflower_points(nRotorPoints)
print "setting rotor points"
return probs
# def set_params(probs):
# floris params
# probs[2]['model_params:kd'] = 0.224109
# probs[2]['model_params:initialWakeAngle'] = 3.384485
# probs[2]['model_params:initialWakeDisplacement'] = 8.407578
# probs[2]['model_params:bd'] = -0.010000
# probs[2]['model_params:ke'] = 0.055072
# probs[2]['model_params:me'] = np.array([-0.000001, 0.181752, 1.0])
# probs[2]['model_params:MU'] = np.array([0.933389, 1.0, 17.558286])
# probs[2]['model_params:aU'] = 6.751072
# probs[2]['model_params:bU'] = 1.681766
# probs[2]['model_params:cos_spread'] = 9.989090
# gauss params
# probs[0]['model_params:ke'] = 0.052
# probs[0]['model_params:spread_angle'] = 6.
# probs[0]['model_params:rotation_offset_angle'] = 2.0
# for axialInd calc only
# probs[0]['model_params:ke'] = 0.050688
# probs[0]['model_params:spread_angle'] = 7.562716
# probs[0]['model_params:rotation_offset_angle'] = 3.336568
# for axialInd and inflow adjust
# probs[0]['model_params:ke'] = 0.052333
# probs[0]['model_params:spread_angle'] = 8.111330
# probs[0]['model_params:rotation_offset_angle'] = 2.770265
# for inflow adjust only
# probs[0]['model_params:ke'] = 0.052230
# probs[0]['model_params:spread_angle'] = 6.368191
# probs[0]['model_params:rotation_offset_angle'] = 1.855112
# for added n_st_dev param #1
# probs[0]['model_params:ke'] = 0.050755
# probs[0]['model_params:spread_angle'] = 11.205766#*0.97
# probs[0]['model_params:rotation_offset_angle'] = 3.651790
# probs[0]['model_params:n_std_dev'] = 9.304371
# for added n_st_dev param #2
# probs[0]['model_params:ke'] = 0.051010
# probs[0]['model_params:spread_angle'] = 11.779591
# probs[0]['model_params:rotation_offset_angle'] = 3.564547
# probs[0]['model_params:n_std_dev'] = 9.575505
# for decoupled ky with n_std_dev = 4
# probs[0]['model_params:ke'] = 0.051145
# probs[0]['model_params:spread_angle'] = 2.617982
# probs[0]['model_params:rotation_offset_angle'] = 3.616082
# probs[0]['model_params:ky'] = 0.211496
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# probs[0]['model_params:ke'] = 0.016969
# probs[0]['model_params:spread_angle'] = 0.655430
# probs[0]['model_params:rotation_offset_angle'] = 3.615754
# probs[0]['model_params:ky'] = 0.195392
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# probs[0]['model_params:ke'] = 0.008858
# probs[0]['model_params:spread_angle'] = 0.000000
# probs[0]['model_params:rotation_offset_angle'] = 4.035276
# probs[0]['model_params:ky'] = 0.199385
# for decoupled ke with n_std_dev=4, linear, not integrating
# probs[0]['model_params:ke'] = 0.051190
# probs[0]['model_params:spread_angle'] = 2.619202
# probs[0]['model_params:rotation_offset_angle'] = 3.629337
# probs[0]['model_params:ky'] = 0.211567
# for n_std_dev = 4, error = 1332.49, not integrating, power law
# probs[0]['model_params:ke'] = 0.051360
# probs[0]['model_params:rotation_offset_angle'] = 3.197348
# probs[0]['model_params:Dw0'] = 1.804024
# probs[0]['model_params:Dw0'] = 1.63
# probs[0]['model_params:m'] = 0.00
# for n_std_dev = 5.4, error = 1136.21, not integrating, power law
# probs[0]['model_params:ke'] = 0.051147
# probs[0]['model_params:rotation_offset_angle'] = 3.616963
# probs[0]['model_params:Dw0'] = 1.834599
# probs[0]['model_params:m'] = 0.096035
# for decoupled ky with n_std_dev = 4, error = 1630.8, with integrating, power law
# probs[0]['model_params:ke'] = 0.033165
# probs[0]['model_params:rotation_offset_angle'] = 3.328051
# probs[0]['model_params:Dw0'] = 1.708328
# probs[0]['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1140.59, not integrating, power law for expansion,
# linear for yaw
# probs[0]['model_params:ke'] = 0.050741
# probs[0]['model_params:rotation_offset_angle'] = 3.628737
# probs[0]['model_params:Dw0'] = 0.846582
# probs[0]['model_params:ky'] = 0.207734
# for decoupled ky with n_std_dev = 4, error = 1058.73, integrating, power law for expansion,
# linear for yaw
# probs[0]['model_params:ke'] = 0.016129
# probs[0]['model_params:rotation_offset_angle'] = 3.644356
# probs[0]['model_params:Dw0'] = 0.602132
# probs[0]['model_params:ky'] = 0.191178
# for power law yaw, deficit, and expansion, error = 1759.5
# probs[0]['model_params:rotation_offset_angle'] = 1.393646
# probs[0]['model_params:Dw0'] = 1.254036
# probs[0]['model_params:m'] = 0.166732
# for power law yaw, deficit, and expansion (reccomended values)
# probs[0]['model_params:rotation_offset_angle'] = 1.393646
# probs[0]['model_params:Dw0'] = 1.33
# probs[0]['model_params:m'] = 0.33
# for power law all, Dw0 separate, tuned m
# probs[0]['model_params:rotation_offset_angle'] = 1.454099
# probs[0]['model_params:Dw0'] = np.array([1.305050, 1.401824, 1.420907])
# probs[0]['model_params:m'] = 0.101128
# for power law all, Dw0 separate, constant m
# probs[0]['model_params:rotation_offset_angle'] = 1.454099
# probs[0]['model_params:rotation_offset_angle'] = 1.096865
# probs[0]['model_params:Dw0'] = np.array([1.281240, 0.897360, 0.911161])
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.00005, 1.])
# probs[0]['model_params:m'] = 0.
# for power all but deficit with constant m
# probs[0]['model_params:ke'] = 0.051126
# probs[0]['model_params:rotation_offset_angle'] = 3.603684
# probs[0]['model_params:Dw0'] = np.array([1.794989, 0.863206, 1.])
# probs[0]['model_params:m'] = 0.33
# for power law all with constant m
# probs[0]['model_params:rotation_offset_angle'] = 0.620239
# probs[0]['model_params:Dw0'] = np.array([1.265505, 0.958504, 0.896609])
# probs[0]['model_params:Dw0'] = np.array([1.3, 0.958504, 0.896609])
# probs[0]['model_params:m'] = 0.33
# for power law all with tuned m
# probs[0]['model_params:rotation_offset_angle'] = 0.727846
# probs[0]['model_params:Dw0'] = np.array([1.185009, 1.140757, 1.058244])
# probs[0]['model_params:m'] = 0.230722
# for power law all with tuned m and double weight yaw error
# probs[0]['model_params:rotation_offset_angle'] = 0.802148541875
# probs[0]['model_params:Dw0'] = np.array([1.18307813, 1.16833547, 1.08521648])
# probs[0]['model_params:m'] = 0.210864251457
# for power law all with tuned m and 20x weight yaw error
# probs[0]['model_params:rotation_offset_angle'] = 0.871926
# probs[0]['model_params:Dw0'] = np.array([1.190799, 1.223558, 1.142646])
# probs[0]['model_params:m'] = 0.167548
# for power law all with individually tuned m and Dw0
# probs[0]['model_params:rotation_offset_angle'] = 0.811689835284
# probs[0]['model_params:Dw0'] = np.array([1.22226021, 1.39849858, 0.97207545])
# probs[0]['model_params:m'] = np.array([0.15566507, 0.1, 0.28422703])
# for power law all with individually tuned m and Dw0, yaw weighted by 3
# probs[0]['model_params:rotation_offset_angle'] = 0.884526810188
# probs[0]['model_params:Dw0'] = np.array([1.21546909, 1.37702043, 0.95703538])
# probs[0]['model_params:m'] = np.array([0.17499415, 0.1, 0.28738021])
# for power law all with individually tuned m and Dw0, yaw weighted by 3
# probs[0]['model_params:rotation_offset_angle'] = 0.726281139043
# probs[0]['model_params:Dw0'] = np.array([10.80879724, 1.25208657, 0.62180341])
# probs[0]['model_params:m'] = np.array([0.5014354, 0.1, 0.53332655])
# for individual power law for diam and deficit. Yaw with linear model
# probs[0]['model_params:rotation_offset_angle'] = 0.810644329131
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.64288886, 0.9818137])
# probs[0]['model_params:m'] = np.array([0.33, 0., 0.27860778])
# probs[0]['model_params:ky'] = 0.0679899837662
# for power law all with individually tuned m and Dw0, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 2.11916457882
# probs[0]['model_params:Dw0'] = np.array([1.86868658, 1.6258426, 0.94648549])
# probs[0]['model_params:m'] = np.array([0., 0., 0.29782246])
# # for power law with individually tuned m and Dw0, linear yaw, including rotor offset, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 1.482520
# probs[0]['model_params:ky'] = 0.204487
# probs[0]['model_params:Dw0'] = np.array([1.3, 0.607414, 0.107801])
# probs[0]['model_params:m'] = np.array([0.33, 0., 0.964934])
# for power law with individually tuned m and Dw0 including rotor offset for diam and deficit, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 2.054952
# probs[0]['model_params:Dw0'] = np.array([1.869272, 0.612485, 0.123260])
# probs[0]['model_params:m'] = np.array([0., 0., 0.885561])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, linear offset
# probs[0]['model_params:rotation_offset_angle'] = 0.921858
# probs[0]['model_params:ky'] = 0.085021
# probs[0]['model_params:Dw0'] = np.array([1.342291, 1.641186, 0.728072])
# probs[0]['model_params:m'] = np.array([0.100775, 0., -0.585698])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 1.062842
# probs[0]['model_params:rotation_offset_angle'] = 2.062842
# probs[0]['model_params:Dw0'] = np.array([1.333577, 1.621352, 0.639195])
# probs[0]['model_params:m'] = np.array([0.130396, 0., -0.522295])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 0.946076
# probs[0]['model_params:Dw0'] = np.array([1.353735, 1.623139, 0.656002])
# probs[0]['model_params:m'] = np.array([0.236072, 0., -0.541287])
# for power law with suggested m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 1.5
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.3, 0.56])
# probs[0]['model_params:m'] = np.array([0.33, 0.33, -0.57])
# linear everything - coupled - tuned to all data - inflow for Fleming data at 8.3....
# probs[0]['model_params:ke'] = 0.052166
# probs[0]['model_params:spread_angle'] = 3.156446
# probs[0]['model_params:rotation_offset_angle'] = 1.124459
# probs[0]['model_params:ky'] = 0.247883
# for n_std_dev = 4, error = 1332.49, not integrating, power law
# probs[0]['model_params:ke'] = 0.051360
# probs[0]['model_params:rotation_offset_angle'] = 3.197348
# probs[0]['model_params:Dw0'] = np.array([1.804024, 1.804024, 1.804024])
# probs[0]['model_params:m'] = np.array([0.0, 0.0, 0.0])
# for n_std_dev = 4, linear all, 2*D
# probs[0]['model_params:ke'] = 0.112334
# probs[0]['model_params:ky'] = 0.468530
# probs[0]['model_params:spread_angle'] = 0.0
# probs[0]['model_params:rotation_offset_angle'] = 1.915430
# rederived yaw with power. Power law all
# probs[0]['model_params:rotation_offset_angle'] = 1.5*0.946076
# probs[0]['model_params:Dw0'] = np.array([1.353735, 1.623139, 0.656002])
# probs[0]['model_params:m'] = np.array([0.236072, 0.0, -0.541287])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1]
# probs[0]['model_params:rotation_offset_angle'] = 1.02985
# probs[0]['model_params:Dw0'] = np.array([1.388779, 1.388779, 0.642637])
# probs[0]['model_params:m'] = np.array([0.100669, 0.100669, -0.530337])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1], tuned to all data
# probs[0]['model_params:rotation_offset_angle'] = 1.052238
# probs[0]['model_params:Dw0'] = np.array([1.364724, 1.364724, 0.663934])
# probs[0]['model_params:m'] = np.array([0.092746, 0.092746, -0.542009])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1], tuned to all data
# rederived deficit using actuator disc and momentum balance
# probs[0]['model_params:rotation_offset_angle'] = 2.089085
# probs[0]['model_params:Dw0'] = np.array([1.488695, 1.488695, 0.560000])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.542009])
# probs[0]['model_params:rotation_offset_angle'] = 1.749621
# probs[0]['model_params:Dw0'] = np.array([1.267740, 1.267740, 0.560000])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.542009])
# power law as per Aitken et all plus axial induction*2
# this is a pretty reasonable fit, but defines no expansion in the wake
# probs[0]['model_params:rotation_offset_angle'] = 2.229160
# probs[0]['model_params:Dw0'] = np.array([1.889748, 1.603116, 1.037203])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.563005])
# power law as per Aitken et all plus axial induction*2, added x shift by 1D
# probs[0]['model_params:rotation_offset_angle'] = 2.078138 + 1.5
# probs[0]['model_params:Dw0'] = np.array([2.040208, 1.596522, 1.474140])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.698327])
# power law as per Aitken et all plus axial induction*2, added x shift by 1D except for deficit
# also a reasonable fit, but no wake expansion
# probs[0]['model_params:rotation_offset_angle'] = 2.038664
# probs[0]['model_params:Dw0'] = np.array([2.038664, 1.601559, 1.055975])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.574079])
# power law as per Aitken et all plus axial induction*2, added y shift tunable
# excellent fit, but no wake expansion and uses linear yaw offset
# probs[0]['model_params:rotation_offset_angle'] = 8.466369
# probs[0]['model_params:Dw0'] = np.array([1.893739, 1.586107, 0.987548])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.524822])
# probs[0]['model_params:yshift'] = -21.775754
# probs[0]['model_params:rotation_offset_angle'] = 10.762858
# probs[0]['model_params:Dw0'] = np.array([1.748372, 1.345945, 1.045982])
# probs[0]['model_params:m'] = np.array([0.100000, 0.100000, -0.556969])
# probs[0]['model_params:yshift'] = -30.551647
# using Bastankhah with linear yaw
# probs[0]['model_params:ke'] = 0.077491
# probs[0]['model_params:ky'] = 0.159944
# probs[0]['model_params:yshift'] = -4.614311
# Bastankhah with Bastankhah yaw
# probs[0]['model_params:ke'] = 0.07747
# probs[0]['model_params:ky'] = 0.159944
# probs[0]['model_params:yshift'] = -4.614311
# probs[0]['model_params:ke'] = 0.078413
# probs[0]['model_params:ky'] = 0.641951
# probs[0]['model_params:yshift'] = -3.870224
# probs[0]['model_params:ke'] = 0.038558
# probs[0]['model_params:ky'] = 0.078129
# probs[0]['model_params:yshift'] = -19.948941
# probs[0]['model_params:rotation_offset_angle'] = -4.0
# probs[0]['model_params:ke'] = 0.038993
# probs[0]['model_params:ky'] = 0.087260
# probs[0]['model_params:yshift'] = -4.614311
# probs[0]['model_params:ke'] = 0.0390487790134
# probs[0]['model_params:ky'] = 0.039
# probs[0]['model_params:rotation_offset_angle'] = 0.72681975016
# ke = ky tuned to all
# probs[0]['model_params:ke'] = 0.039166
# probs[0]['model_params:ky'] = 0.039166
# probs[0]['model_params:rotation_offset_angle'] = 1.044754
# ke != ky tuned to all
# probs[0]['model_params:ke'] = 0.039200
# probs[0]['model_params:ky'] = 0.048369
# probs[0]['model_params:rotation_offset_angle'] = 1.175184
# ke != ky tuned to 7D
# probs[0]['model_params:ke'] = 0.035706
# probs[0]['model_params:ky'] = 0.046970
# probs[0]['model_params:rotation_offset_angle'] = 2.342700
# ke = ky tuned to 7D
# probs[0]['model_params:ke'] = 0.036002
# probs[0]['model_params:ky'] = 0.036002
# probs[0]['model_params:rotation_offset_angle'] = 1.5
# Bastankhah with power yaw
# probs[0]['model_params:ke'] = 0.07747
# probs[0]['model_params:Dw0'] = np.array([1.49752, 1.3, 1.3])
# probs[0]['model_params:m'] = np.array([0.23975, 0.33, 0.33])
# probs[0]['model_params:yshift'] = -4.63626
# linear everything - coupled - tuned to all data - inflow for Fleming data at 8.3....
# probs[0]['model_params:ke'] = 0.051690
# probs[0]['model_params:spread_angle'] = 3.115443
# probs[0]['model_params:rotation_offset_angle'] = 1.235173
# probs[0]['model_params:ky'] = 0.205729
# probs[0]['model_params:integrate'] = False
# probs[0]['model_params:spread_mode'] = 'power'
# probs[0]['model_params:yaw_mode'] = 'power'
# probs[0]['model_params:n_std_dev'] = 4.
if __name__ == "__main__":
probs = setup_probs()
# set_params(probs)
# time the models
import time
t1 = time.time()
for i in range(0, 100):
probs[0].run()
t2 = time.time()
for i in range(0, 100):
probs[1].run()
t3 = time.time()
# gauss time: 0.0580031871796
# floris time: 0.10697388649
print 'gauss time: ', t2-t1
print 'floris time: ', t3-t2
print probs[1]['wtVelocity0']
print probs[1]['wtPower0']
print probs[1]['AEP']
# load data
ICOWESdata = loadmat('../data/YawPosResults.mat')
with open('../data/yawPower.p', 'rb') as handle:
yawrange_4D, SOWFApower_yaw_4D, _, _ = pickle.load(handle)
with open('../data/offset4DPower.p', 'rb') as handle:
posrange_cs_4D, SOWFApower_cs_4D = pickle.load(handle)
with open('../data/offset6DPower.p', 'rb') as handle:
posrange_cs_6D, SOWFApower_cs_6D = pickle.load(handle)
with open('../data/spacePower.p', 'rb') as handle:
posrange_ds, SOWFApower_ds = pickle.load(handle)
# set plot params
rotor_diameter = probs[0]['rotorDiameter'][0]
ICOWESvelocity = 8.0
PFvelocity = 8.48673684
PFvelocity = 8.38673684
power_scalar = 1E-3
distance_scalar = 1./rotor_diameter
velocity_scalar = 1.
angle_scalar = 1.
floris_color = 'b'
gauss_color = 'r'
floris_tuned_color = 'g'
floris_line = '-'
floris_tuned_line = '-.'
gauss_line = '--'
FlorisError = 0.0
GaussError = 0.0
FlorisTunedError = 0.0
# ################## compare yaw ######################
YawPowFig, YawPowAx = plt.subplots(ncols=2, nrows=1, sharey=False)
plt.hold(True)
# 4D yaw
yawrange = np.array(list(yawrange_4D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
# set to 4D positions and inflow velocity
for prob in probs:
prob['turbineX'] = np.array([1118.1, 1556.0])
prob['turbineY'] = np.array([1279.5, 1532.3])
prob['windSpeeds'] = np.array([PFvelocity])
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
# print FlorisPower
print FlorisPower
print GaussianPower
SOWFApower = SOWFApower_yaw_4D*1E-3
plot_data_vs_model(ax=YawPowAx[0], modelx=yawrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
plot_data_vs_model(ax=YawPowAx[0], datax=yawrange, datay=SOWFApower, modelx=yawrange,
modely=GaussianPower, title='4D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
# plot_data_vs_model(ax=YawPowAx[0], datax=yawrange, datay=SOWFApower, modelx=yawrange,
# modely=FlorisPowerTuned, title='4D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=angle_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# 7D yaw
yawrange = ICOWESdata['yaw'][0]
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
# set to 7D positions
for prob in probs:
prob['turbineX'] = np.array([1118.1, 1881.9])
prob['turbineY'] = np.array([1279.5, 1720.5])
prob['windSpeeds'] = np.array([ICOWESvelocity])
# run analysis
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
# plot
SOWFApower = np.array([ICOWESdata['yawPowerT1'][0], ICOWESdata['yawPowerT2'][0]]).transpose()/1000.
plot_data_vs_model(ax=YawPowAx[1], modelx=yawrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
plot_data_vs_model(ax=YawPowAx[1], datax=yawrange, datay=SOWFApower, modelx=yawrange,
modely=GaussianPower, title='7D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
# plot_data_vs_model(ax=YawPowAx[1], datax=yawrange, datay=SOWFApower, modelx=yawrange,
# modely=FlorisPowerTuned, title='7D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=angle_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# ################## compare position ######################
PosPowFig, PosPowAx = plt.subplots(ncols=2, nrows=2, sharey=False)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windSpeeds'] = np.array([PFvelocity])
# position crosswind 4D
posrange = np.array(list(posrange_cs_4D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_cs_4D*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[0, 0], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[0, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='4D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[0, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='4D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position crosswind 6D
posrange = np.array(list(posrange_cs_6D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([PFvelocity])
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_cs_6D*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[0, 1], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[0, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='6D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[0, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='6D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position crosswind 7D
posrange = ICOWESdata['pos'][0]
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([ICOWESvelocity])
for pos2 in posrange:
# Define turbine locations and orientation
effUdXY = 0.523599
Xinit = np.array([1118.1, 1881.9])
Yinit = np.array([1279.5, 1720.5])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = np.array([ICOWESdata['posPowerT1'][0], ICOWESdata['posPowerT2'][0]]).transpose()/1000.
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[1, 0], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[1, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='7D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[1, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='7D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position downstream
posrange = np.array(list(posrange_ds))*rotor_diameter
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([PFvelocity])
prob['turbineY'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.0])
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0.0, pos2])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_ds*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[1, 1], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[1, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='Downstream', xlabel='x/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[1, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='Downstream', xlabel='x/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
print 'Floris error: ', FlorisError, ' Gauss error: ', GaussError, 'Floris Re-Tuned Error: ', FlorisTunedError
# ################## compare velocity ######################
PosVelFig, PosVelAx = plt.subplots(ncols=2, nrows=2, sharey=False)
# velocity crosswind 7D
posrange = np.linspace(-3.*rotor_diameter, 3.*rotor_diameter, num=1000)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.])
prob['turbineX'] = np.array([0, 7.*rotor_diameter])
GaussianVelocity = list()
FlorisVelocity = list()
FlorisVelocityTuned = list()
for pos2 in posrange:
for prob in probs:
prob['turbineY'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(probs[0]['wtVelocity0']))
FlorisVelocity.append(list(probs[1]['wtVelocity0']))
FlorisVelocityTuned.append(list(probs[2]['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
FlorisVelocityTuned = np.array(FlorisVelocityTuned)
plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange,
modely=FlorisVelocity/PFvelocity,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange, modely=GaussianVelocity/PFvelocity, title='7D',
xlabel='y/D', ylabel='Velocity (m/s)',
modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
# plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange, modely=FlorisVelocityTuned/PFvelocity, title='7D',
# xlabel='y/D', ylabel='Velocity (m/s)',
# modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
# plt.legend()
# plt.show()
# velocity downstream inline
posrange = np.linspace(-1.*rotor_diameter, 30.*rotor_diameter, num=1000)
for prob in probs:
prob['turbineY'] = np.array([0, 0])
GaussianVelocity = list()
FlorisVelocity = list()
FlorisVelocityTuned = list()
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(probs[0]['wtVelocity0']))
FlorisVelocity.append(list(probs[1]['wtVelocity0']))
FlorisVelocityTuned.append(list(probs[2]['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
FlorisVelocityTuned = np.array(FlorisVelocityTuned)
plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange,
modely=FlorisVelocity/PFvelocity, modellabel='FLORIS',
modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False, front=False, legend=True)
plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange, modely=GaussianVelocity/PFvelocity, title='Downstream (inline)',
xlabel='y/D', ylabel='Velocity (m/s)',
modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False, front=False, legend=True)
# plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange, modely=FlorisVelocityTuned/PFvelocity, title='Downstream (inline)',
# xlabel='y/D', ylabel='Velocity (m/s)',
# modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=velocity_scalar,
# sum=False, front=False, legend=True)
PosVelAx[1, 1].plot(np.array([7.0, 7.0]), np.array([0.0, 1.2]), ':k', label='Tuning Point')
plt.xlabel('x/D')
plt.ylabel('Velocity (m/s)')
# plt.legend(loc=4,labels=['FLORIS, SOWFA, BPA'])
plt.show() | apache-2.0 |
edmonto/dogs-and-cats | CatsDogsConvNet.py | 1 | 5014 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 4 16:36:25 2017
@author: Tiffany
"""
import os, cv2, random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
#matplotlib inline
from keras import backend as K
K.set_image_dim_ordering("th")
from keras.models import Sequential
from keras.layers import Input, Dropout, Flatten, Convolution2D, MaxPooling2D, Dense, Activation
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.utils import np_utils
TRAIN_DIR = './convnet-train/'
TEST_DIR = './convnet-test/'
ROWS = 64
COLS = 64
CHANNELS = 3
## image preprocessing
train_images = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)] # use this for full dataset
train_dogs = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR) if 'dog' in i]
train_cats = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR) if 'cat' in i]
test_images = [TEST_DIR+i for i in os.listdir(TEST_DIR)]
train_images = train_dogs + train_cats
random.shuffle(train_images)
test_images = test_images
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR) #cv2.IMREAD_GRAYSCALE
return cv2.resize(img, (ROWS, COLS), interpolation=cv2.INTER_CUBIC)
def prep_data(images):
count = len(images)
data = np.ndarray((count, CHANNELS, ROWS, COLS), dtype=np.uint8)
for i, image_file in enumerate(images):
image = read_image(image_file)
data[i] = image.T
if i%250 == 0: print('Processed {} of {}'.format(i, count))
return data
train = prep_data(train_images)
test = prep_data(test_images)
print("Train shape: {}".format(train.shape))
print("Test shape: {}".format(test.shape))
## dogs/cats labels
labels = []
for i in train_images:
if 'dog' in i:
labels.append(1)
else:
labels.append(0)
## convnet model
optimizer = RMSprop(lr=1e-4)
objective = 'binary_crossentropy'
def catdog():
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(3, ROWS, COLS), activation='relu'))
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss=objective, optimizer=optimizer, metrics=['accuracy'])
return model
model = catdog()
## train model
nb_epoch = 10
batch_size = 16
## Callback for loss logging per epoch
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
def run_catdog():
history = LossHistory()
model.fit(train, labels, batch_size=batch_size, nb_epoch=nb_epoch,
validation_split=0.25, verbose=0, shuffle=True, callbacks=[history, early_stopping])
predictions = model.predict(test, verbose=0)
return predictions, history
predictions, history = run_catdog()
## Visualize Loss Function
loss = history.losses
val_loss = history.val_losses
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('VGG-16 Loss Trend')
plt.plot(loss, 'blue', label='Training Loss')
plt.plot(val_loss, 'green', label='Validation Loss')
plt.xticks(range(0,nb_epoch)[0::2])
plt.legend()
plt.show()
## Visualize Predictions
for i in range(0,10):
if predictions[i, 0] >= 0.5:
print('I am {:.2%} sure this is a Dog'.format(predictions[i][0]))
else:
print('I am {:.2%} sure this is a Cat'.format(1-predictions[i][0]))
plt.imshow(test[i].T)
plt.show() | mit |
gotomypc/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
huongttlan/bokeh | examples/charts/file/bar.py | 37 | 2221 | from collections import OrderedDict
import numpy as np
import pandas as pd
from bokeh.charts import Bar, output_file, show, vplot, hplot
from bokeh.models import Range1d
from bokeh.sampledata.olympics2014 import data as original_data
width = 700
height = 500
legend_position = "top_right"
data = {d['abbr']: d['medals'] for d in original_data['data'] if d['medals']['total'] > 0}
countries = sorted(data.keys(), key=lambda x: data[x]['total'], reverse=True)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# dict input
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
dict_stacked = Bar(
medals, countries, title="OrderedDict input | Stacked", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height,
stacked=True
)
# data frame input
df = pd.DataFrame(medals, index=countries)
df_grouped = Bar(
df, title="Data Frame input | Grouped", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height
)
# Numpy array input with different data to affect the ranges
random = np.random.rand(3, 8)
mixed = random - np.random.rand(3, 8)
categories = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
np_stacked = Bar(
random, cat=categories, title="Numpy Array input | Stacked",
ylabel="Random Number", xlabel="", width=width, height=height,
stacked=True
)
np_negative_grouped = Bar(
random * -1, cat=categories, title="All negative input | Grouped",
ylabel="Random Number", width=width, height=height
)
np_custom = Bar(
mixed, cat=categories, title="Custom range (start=-3, end=0.4)",
ylabel="Random Number", width=width, height=height,
continuous_range=Range1d(start=-3, end=0.4)
)
np_mixed_grouped = Bar(
mixed, cat=categories, title="Mixed-sign input | Grouped",
ylabel="Random Number", width=width, height=height
)
# collect and display
output_file("bar.html")
show(vplot(
hplot(dict_stacked, df_grouped),
hplot(np_stacked, np_negative_grouped),
hplot(np_mixed_grouped, np_custom),
))
| bsd-3-clause |
dmlc/xgboost | tests/python/generate_models.py | 1 | 4996 | import xgboost
import numpy as np
import os
kRounds = 2
kRows = 1000
kCols = 4
kForests = 2
kMaxDepth = 2
kClasses = 3
X = np.random.randn(kRows, kCols)
w = np.random.uniform(size=kRows)
version = xgboost.__version__
np.random.seed(1994)
target_dir = 'models'
def booster_bin(model):
return os.path.join(target_dir,
'xgboost-' + version + '.' + model + '.bin')
def booster_json(model):
return os.path.join(target_dir,
'xgboost-' + version + '.' + model + '.json')
def skl_bin(model):
return os.path.join(target_dir,
'xgboost_scikit-' + version + '.' + model + '.bin')
def skl_json(model):
return os.path.join(target_dir,
'xgboost_scikit-' + version + '.' + model + '.json')
def generate_regression_model():
print('Regression')
y = np.random.randn(kRows)
data = xgboost.DMatrix(X, label=y, weight=w)
booster = xgboost.train({'tree_method': 'hist',
'num_parallel_tree': kForests,
'max_depth': kMaxDepth},
num_boost_round=kRounds, dtrain=data)
booster.save_model(booster_bin('reg'))
booster.save_model(booster_json('reg'))
reg = xgboost.XGBRegressor(tree_method='hist',
num_parallel_tree=kForests,
max_depth=kMaxDepth,
n_estimators=kRounds)
reg.fit(X, y, w)
reg.save_model(skl_bin('reg'))
reg.save_model(skl_json('reg'))
def generate_logistic_model():
print('Logistic')
y = np.random.randint(0, 2, size=kRows)
assert y.max() == 1 and y.min() == 0
for objective, name in [('binary:logistic', 'logit'), ('binary:logitraw', 'logitraw')]:
data = xgboost.DMatrix(X, label=y, weight=w)
booster = xgboost.train({'tree_method': 'hist',
'num_parallel_tree': kForests,
'max_depth': kMaxDepth,
'objective': objective},
num_boost_round=kRounds, dtrain=data)
booster.save_model(booster_bin(name))
booster.save_model(booster_json(name))
reg = xgboost.XGBClassifier(tree_method='hist',
num_parallel_tree=kForests,
max_depth=kMaxDepth,
n_estimators=kRounds,
objective=objective)
reg.fit(X, y, w)
reg.save_model(skl_bin(name))
reg.save_model(skl_json(name))
def generate_classification_model():
print('Classification')
y = np.random.randint(0, kClasses, size=kRows)
data = xgboost.DMatrix(X, label=y, weight=w)
booster = xgboost.train({'num_class': kClasses,
'tree_method': 'hist',
'num_parallel_tree': kForests,
'max_depth': kMaxDepth},
num_boost_round=kRounds, dtrain=data)
booster.save_model(booster_bin('cls'))
booster.save_model(booster_json('cls'))
cls = xgboost.XGBClassifier(tree_method='hist',
num_parallel_tree=kForests,
max_depth=kMaxDepth,
n_estimators=kRounds)
cls.fit(X, y, w)
cls.save_model(skl_bin('cls'))
cls.save_model(skl_json('cls'))
def generate_ranking_model():
print('Learning to Rank')
y = np.random.randint(5, size=kRows)
w = np.random.uniform(size=20)
g = np.repeat(50, 20)
data = xgboost.DMatrix(X, y, weight=w)
data.set_group(g)
booster = xgboost.train({'objective': 'rank:ndcg',
'num_parallel_tree': kForests,
'tree_method': 'hist',
'max_depth': kMaxDepth},
num_boost_round=kRounds,
dtrain=data)
booster.save_model(booster_bin('ltr'))
booster.save_model(booster_json('ltr'))
ranker = xgboost.sklearn.XGBRanker(n_estimators=kRounds,
tree_method='hist',
objective='rank:ndcg',
max_depth=kMaxDepth,
num_parallel_tree=kForests)
ranker.fit(X, y, g, sample_weight=w)
ranker.save_model(skl_bin('ltr'))
ranker.save_model(skl_json('ltr'))
def write_versions():
versions = {'numpy': np.__version__,
'xgboost': version}
with open(os.path.join(target_dir, 'version'), 'w') as fd:
fd.write(str(versions))
if __name__ == '__main__':
if not os.path.exists(target_dir):
os.mkdir(target_dir)
generate_regression_model()
generate_logistic_model()
generate_classification_model()
generate_ranking_model()
write_versions()
| apache-2.0 |
mfjb/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/tsa/tests/test_x13.py | 27 | 1877 | from nose import SkipTest
from numpy.testing import assert_
from statsmodels.tsa.base.datetools import dates_from_range
from statsmodels.tsa.x13 import _find_x12, x13_arima_select_order
x13path = _find_x12()
if x13path is False:
_have_x13 = False
else:
_have_x13 = True
class TestX13(object):
@classmethod
def setupClass(cls):
if not _have_x13:
raise SkipTest('X13/X12 not available')
import pandas as pd
from statsmodels.datasets import macrodata, co2
dta = macrodata.load_pandas().data
dates = dates_from_range('1959Q1', '2009Q3')
index = pd.DatetimeIndex(dates)
dta.index = index
cls.quarterly_data = dta.dropna()
dta = co2.load_pandas().data
dta['co2'] = dta.co2.interpolate()
cls.monthly_data = dta.resample('M')
cls.monthly_start_data = dta.resample('MS')
def test_x13_arima_select_order(self):
res = x13_arima_select_order(self.monthly_data)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_start_data)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_data.co2)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_start_data.co2)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.quarterly_data[['realgdp']])
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.quarterly_data.realgdp)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
| bsd-3-clause |
astropy/astropy | astropy/visualization/tests/test_lupton_rgb.py | 7 | 9224 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
import sys
import os
import tempfile
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.visualization import lupton_rgb
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB # noqa
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin='lower')
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10)*100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)/3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)//3)
class TestLuptonRgb:
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v*pow(10, 0.4*ri)
image_g[p[0], p[1]] = v*pow(10, 0.4*gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip('saturation correction is not implemented')
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q,
saturated_border_width=1, saturated_pixel_value=2000,
filename=temp)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip('replaceSaturatedPixels is not implemented in astropy yet')
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError) as excinfo:
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
assert "shapes must match" in str(excinfo.value)
| bsd-3-clause |
zzw922cn/Automatic_Speech_Recognition | speechvalley/feature/libri/libri_preprocess.py | 1 | 6953 | # encoding: utf-8
# ******************************************************
# Author : zzw922cn
# Last modified: 2017-12-09 11:00
# Email : [email protected]
# Filename : libri_preprocess.py
# Description : Feature preprocessing for LibriSpeech dataset
# ******************************************************
import os
import glob
import sklearn
import argparse
import numpy as np
import scipy.io.wavfile as wav
from sklearn import preprocessing
from subprocess import check_call, CalledProcessError
from speechvalley.feature.core import calcfeat_delta_delta
def preprocess(root_directory):
"""
Function to walk through the directory and convert flac to wav files
"""
try:
check_call(['flac'])
except OSError:
raise OSError("""Flac not installed. Install using apt-get install flac""")
for subdir, dirs, files in os.walk(root_directory):
for f in files:
filename = os.path.join(subdir, f)
if f.endswith('.flac'):
try:
check_call(['flac', '-d', filename])
os.remove(filename)
except CalledProcessError as e:
print("Failed to convert file {}".format(filename))
elif f.endswith('.TXT'):
os.remove(filename)
elif f.endswith('.txt'):
with open(filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
sub_n = line.split(' ')[0] + '.label'
subfile = os.path.join(subdir, sub_n)
sub_c = ' '.join(line.split(' ')[1:])
sub_c = sub_c.lower()
with open(subfile, 'w') as sp:
sp.write(sub_c)
elif f.endswith('.wav'):
if not os.path.isfile(os.path.splitext(filename)[0] +
'.label'):
raise ValueError(".label file not found for {}".format(filename))
else:
pass
def wav2feature(root_directory, save_directory, name, win_len, win_step, mode, feature_len, seq2seq, save):
count = 0
dirid = 0
level = 'cha' if seq2seq is False else 'seq2seq'
data_dir = os.path.join(root_directory, name)
preprocess(data_dir)
for subdir, dirs, files in os.walk(data_dir):
for f in files:
fullFilename = os.path.join(subdir, f)
filenameNoSuffix = os.path.splitext(fullFilename)[0]
if f.endswith('.wav'):
rate = None
sig = None
try:
(rate,sig)= wav.read(fullFilename)
except ValueError as e:
if e.message == "File format 'NIST'... not understood.":
sf = Sndfile(fullFilename, 'r')
nframes = sf.nframes
sig = sf.read_frames(nframes)
rate = sf.samplerate
feat = calcfeat_delta_delta(sig,rate,win_length=win_len,win_step=win_step,mode=mode,feature_len=feature_len)
feat = preprocessing.scale(feat)
feat = np.transpose(feat)
print(feat.shape)
labelFilename = filenameNoSuffix + '.label'
with open(labelFilename,'r') as f:
characters = f.readline().strip().lower()
targets = []
if seq2seq is True:
targets.append(28)
for c in characters:
if c == ' ':
targets.append(0)
elif c == "'":
targets.append(27)
else:
targets.append(ord(c)-96)
if seq2seq is True:
targets.append(29)
print(targets)
if save:
count+=1
if count%4000 == 0:
dirid += 1
print('file index:',count)
print('dir index:',dirid)
label_dir = os.path.join(save_directory, level, name, str(dirid), 'label')
feat_dir = os.path.join(save_directory, level, name, str(dirid), 'feature')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir)
featureFilename = os.path.join(feat_dir, filenameNoSuffix.split('/')[-1] +'.npy')
np.save(featureFilename,feat)
t_f = os.path.join(label_dir, filenameNoSuffix.split('/')[-1] +'.npy')
print(t_f)
np.save(t_f,targets)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='libri_preprocess',
description='Script to preprocess libri data')
parser.add_argument("path", help="Directory of LibriSpeech dataset", type=str)
parser.add_argument("save", help="Directory where preprocessed arrays are to be saved",
type=str)
parser.add_argument("-n", "--name", help="Name of the dataset",
choices=['dev-clean', 'dev-other', 'test-clean',
'test-other', 'train-clean-100', 'train-clean-360',
'train-other-500'], type=str, default='dev-clean')
parser.add_argument("-m", "--mode", help="Mode",
choices=['mfcc', 'fbank'],
type=str, default='mfcc')
parser.add_argument("--featlen", help='Features length', type=int, default=13)
parser.add_argument("-s", "--seq2seq", default=False,
help="set this flag to use seq2seq", action="store_true")
parser.add_argument("-wl", "--winlen", type=float,
default=0.02, help="specify the window length of feature")
parser.add_argument("-ws", "--winstep", type=float,
default=0.01, help="specify the window step length of feature")
args = parser.parse_args()
root_directory = args.path
save_directory = args.save
mode = args.mode
feature_len = args.featlen
seq2seq = args.seq2seq
name = args.name
win_len = args.winlen
win_step = args.winstep
if root_directory == '.':
root_directory = os.getcwd()
if save_directory == '.':
save_directory = os.getcwd()
if not os.path.isdir(root_directory):
raise ValueError("LibriSpeech Directory does not exist!")
if not os.path.isdir(save_directory):
os.makedirs(save_directory)
wav2feature(root_directory, save_directory, name=name, win_len=win_len, win_step=win_step,
mode=mode, feature_len=feature_len, seq2seq=seq2seq, save=True)
| mit |
cbertinato/pandas | pandas/tests/indexes/multi/test_names.py | 1 | 3917 | import pytest
import pandas as pd
from pandas import MultiIndex
import pandas.util.testing as tm
def check_level_names(index, names):
assert [level.name for level in index.levels] == list(names)
def test_slice_keep_name():
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_index_name_retained():
# GH9857
result = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]})
result = result.set_index('z')
result.loc[10] = [9, 10]
df_expected = pd.DataFrame({'x': [1, 2, 6, 9],
'y': [2, 2, 8, 10],
'z': [-5, 0, 5, 10]})
df_expected = df_expected.set_index('z')
tm.assert_frame_equal(result, df_expected)
def test_changing_names(idx):
# names should be applied to levels
level_names = [level.name for level in idx.levels]
check_level_names(idx, idx.names)
view = idx.view()
copy = idx.copy()
shallow_copy = idx._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in idx.names]
idx.names = new_names
check_level_names(idx, new_names)
# but not on copies
check_level_names(view, level_names)
check_level_names(copy, level_names)
check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
check_level_names(idx, new_names)
def test_take_preserve_name(idx):
taken = idx.take([3, 0, 1])
assert taken.names == idx.names
def test_copy_names():
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(idx, index_names):
# names are assigned in setup
names = index_names
level_names = [level.name for level in idx.levels]
assert names == level_names
# setting bad names on existing
index = idx
with pytest.raises(ValueError, match="^Length of names"):
setattr(index, "names", list(index.names) + ["third"])
with pytest.raises(ValueError, match="^Length of names"):
setattr(index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = idx.levels
major_codes, minor_codes = idx.codes
with pytest.raises(ValueError, match="^Length of names"):
MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=['first'])
with pytest.raises(ValueError, match="^Length of names"):
MultiIndex(levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_duplicate_level_names_access_raises(idx):
# GH19029
idx.names = ['foo', 'foo']
with pytest.raises(ValueError, match='name foo occurs multiple times'):
idx._get_level_number('foo')
| bsd-3-clause |
andaag/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
hanteng/pyCountrySize | pyCountrySize/01_datapkl.py | 1 | 5651 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
datasource_sn=1
import configparser
Config = configparser.ConfigParser()
Config.read("config.ini")
dir_source = Config.get("Directory", 'source')
dir_inprocess = Config.get("Directory",'inprocess')
dir_outcome = Config.get("Directory",'outcome')
fn_suffix = Config.get("Filename",'suffix')
#file_name defined in config.ini
fn_input=Config.get("datasource{0}".format(datasource_sn),'filename')
import os.path
import pandas as pd
## Loading the XLS source file
df = pd.io.parsers.read_table(os.path.join(dir_source,fn_input), thousands=',' , na_values=["n/a", "--"], encoding="cp1252")
## REMOVing the last two lines from the data source:
df=df.iloc[:-2]
## Simplifyig the column names
def name_rename(name):
dict_trans = {'WEO Country Code': 'WEO',
'WEO Subject Code': 'Subject',
'Country/Series-specific Notes': 'Country_Notes'}
return dict_trans.get(name, name)
col_names = [name_rename(x) for x in df.columns]
df.columns=col_names
df.rename(columns=lambda x: x.replace(" ","_"), inplace=True)
## CHANGing data types
df[['WEO']]=df[['WEO']].astype(int)
df[['ISO']]=df[['ISO']].astype(str)
df[['Subject']]=df[['Subject']].astype(str)
df[['Country_Notes']]=df[['Country_Notes']].astype(str)
df[['Estimates_Start_After']]=df[['Estimates_Start_After']].fillna(-1)
df[['Estimates_Start_After']]=df[['Estimates_Start_After']].astype(int)
#df.convert_objects(convert_numeric=True)
## Adding index points to the dataframe 'ISO', 'Subject'
df.set_index(['ISO', 'Subject'], inplace=True)
## > Select a country (e.g. AFG) and a Subject (e.g. PPPGDP) --> time series data
#print df.query('ISO == "AFG" & Subject=="PPPGDP"')
## >> Pick a Year further
#print df.query('ISO == "AFG" & Subject=="PPPGDP"')['2014']
##ISO Subject
##AFG PPPGDP 61.689
##Name: 2014, dtype: float64
## > Select a year (e.g. 2014) and a Subject (e.g. PPPGDP) --> cross-country (cross-sectional) data
#test=df.query('Subject=="PPPGDP"')['2014']
#print test[0:3]
## > Select a year (e.g. 2013) and a country (e.g. TWN) --> all subject data about a country in a given year
#test=df.query('ISO == "TWN"')['2013']
#print test[0:3]
## Change column names that look like integer, integers
def integerization(x):
try:
return int(x)
except:
return x
df.columns=[integerization(x) for x in list(df.columns)]
df.to_pickle(os.path.join(dir_inprocess, os.path.splitext(os.path.basename(fn_input))[0] + "." + fn_suffix))
##>>> df.head()
## WEO Country Subject_Descriptor \
##ISO Subject
##AFG NGDP_R 512 Afghanistan Gross domestic product, constant prices
## NGDP_RPCH 512 Afghanistan Gross domestic product, constant prices
## NGDP 512 Afghanistan Gross domestic product, current prices
## NGDPD 512 Afghanistan Gross domestic product, current prices
## NGDP_D 512 Afghanistan Gross domestic product, deflator
##
## Subject_Notes \
##ISO Subject
##AFG NGDP_R Expressed in billions of national currency uni...
## NGDP_RPCH Annual percentages of constant price GDP are y...
## NGDP Expressed in billions of national currency uni...
## NGDPD Values are based upon GDP in national currency...
## NGDP_D The GDP deflator is derived by dividing curren...
##
## Units Scale \
##ISO Subject
##AFG NGDP_R National currency Billions
## NGDP_RPCH Percent change NaN
## NGDP National currency Billions
## NGDPD U.S. dollars Billions
## NGDP_D Index NaN
##
## Country_Notes 1980 1981 \
##ISO Subject
##AFG NGDP_R Source: National Statistical Office Latest act... NaN NaN
## NGDP_RPCH See notes for: Gross domestic product, consta... NaN NaN
## NGDP Source: National Statistical Office Latest act... NaN NaN
## NGDPD See notes for: Gross domestic product, curren... NaN NaN
## NGDP_D See notes for: Gross domestic product, consta... NaN NaN
##
## 1982 ... 2011 2012 2013 \
##ISO Subject ...
##AFG NGDP_R NaN ... 386.368 440.336 456.172
## NGDP_RPCH NaN ... 6.479 13.968 3.596
## NGDP NaN ... 836.222 1033.590 1148.110
## NGDPD NaN ... 17.890 20.296 20.735
## NGDP_D NaN ... 216.432 234.728 251.684
##
## 2014 2015 2016 2017 2018 2019 \
##ISO Subject
##AFG NGDP_R 470.947 492.083 516.838 542.993 571.601 603.538
## NGDP_RPCH 3.239 4.488 5.031 5.060 5.269 5.587
## NGDP 1248.660 1378.500 1526.440 1682.610 1858.130 2057.320
## NGDPD 21.706 23.227 24.787 26.380 28.117 30.028
## NGDP_D 265.139 280.136 295.342 309.878 325.075 340.876
| gpl-3.0 |
Derek-Cartwright-Jr/data | pew-religions/Religion-Leah.py | 37 | 3271 | #!/usr/bin/env python
import numpy as np
import pandas as pd
religions = ['Buddhist', 'Catholic', 'Evangel Prot', 'Hindu', 'Hist Black Prot', 'Jehovahs Witness', 'Jewish', 'Mainline Prot', 'Mormon', 'Muslim', 'Orthodox Christian', 'Unaffiliated']
csv = open("current.csv", 'w')
csv.truncate()
def write_row(matrix):
arr = np.asarray(matrix[0])[0]
row = ','.join([str(a) for a in arr]) + '\n'
csv.write(row)
# Intitial distribution of religions in US
first = np.matrix([.007, .208, .254, .007, .065, .008, .019, .147, .016, .009, .005, .228])
# Normed to sum to 100%
current = first / np.sum(first)
t0 = current
write_row(current)
# Transition matrix
trans = np.matrix(((0.390296314, 0.027141947, 0.06791021, 0.001857564, 0, 0, 0.011166082, 0.059762879, 0, 0, 0, 0.396569533),
(0.005370791, 0.593173325, 0.103151608, 0.000649759, 0.010486747, 0.005563864, 0.002041424, 0.053825329, 0.004760476, 0.001130529, 0.000884429, 0.199488989),
(0.00371836, 0.023900817, 0.650773331, 0.000250102, 0.016774503, 0.003098214, 0.001865491, 0.122807467, 0.004203107, 0.000186572, 0.002123778, 0.151866648),
(0, 0, 0.0033732, 0.804072618, 0, 0.001511151, 0, 0.01234639, 0, 0.00209748, 0, 0.17659916),
(0.002051357, 0.016851659, 0.09549708, 0, 0.699214315, 0.010620473, 0.000338804, 0.024372871, 0.000637016, 0.009406884, 0.000116843, 0.129892558),
(0, 0.023278276, 0.109573979, 0, 0.077957568, 0.336280578, 0, 0.074844833, 0.007624035, 0, 0, 0.35110361),
(0.006783201, 0.004082693, 0.014329604, 0, 0, 0.000610585, 0.745731278, 0.009587587, 0, 0, 0.002512334, 0.184058682),
(0.005770357, 0.038017215, 0.187857555, 0.000467601, 0.008144075, 0.004763516, 0.003601208, 0.451798506, 0.005753587, 0.000965543, 0.00109818, 0.25750798),
(0.007263135, 0.01684885, 0.06319935, 0.000248467, 0.0059394, 0, 0.001649896, 0.03464334, 0.642777489, 0.002606278, 0, 0.208904711),
(0, 0.005890381, 0.023573308, 0, 0.011510643, 0, 0.005518343, 0.014032084, 0, 0.772783807, 0, 0.15424369),
(0.004580353, 0.042045841, 0.089264134 , 0, 0.00527346, 0, 0, 0.061471387, 0.005979218, 0.009113978, 0.526728084, 0.243246723),
(0.006438308, 0.044866331, 0.1928814, 0.002035375, 0.04295005, 0.010833621, 0.011541439, 0.09457963, 0.01365141, 0.005884336, 0.002892072, 0.525359211)))
# Fertility array
fert = np.matrix(((2.1, 2.3, 2.3, 2.1, 2.5, 2.1, 2, 1.9, 3.4, 2.8, 2.1, 1.7)))
# Create data frame for printing later
religionDataFrame = pd.DataFrame()
for x in range(0,100):
### beginning of conversion step
# apply transition matrix to current distribution
current = current * trans
### beginning of fertility step
# divide by two for couple number
current = current/2
# adjust by fertility
current = np.multiply(fert, current)
# normalize to 100%
current = current / np.sum(current)
write_row(current)
# add to data frame
religionDataFrame = religionDataFrame.append(pd.DataFrame(current), ignore_index=True)
csv.close()
religionDataFrame.columns = religions
religionDataFrame.to_csv("current_pandas_save.csv")
| mit |
mansenfranzen/tssim | setup.py | 1 | 1710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'pandas',
'numpy',
'bokeh'
]
test_requirements = [
'pytest'
] + requirements
setup(
name='tssim',
version='0.1.1',
description=".",
long_description=readme + '\n\n' + history,
author="Franz Woellert",
author_email='[email protected]',
url='https://github.com/mansenfranzen/tssim',
packages=find_packages(),
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tssim',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={'test': PyTest}
)
| mit |
southpaw94/MachineLearning | Chapter3/Perceptron.py | 1 | 1575 | from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron, LogisticRegression
from plots import PlotFigures
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size=0.3, random_state=0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
print('Misclassified samples: %d' %(y_test != y_pred).sum())
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
PlotFigures.plot_decision_regions(X=X_combined_std, \
y=y_combined, classifier=ppn, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='petal length')
plt.plot(params, weights[:, 1], linestyle='--', label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()
| gpl-2.0 |
MatthieuBizien/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_stats.py | 12 | 6100 | # -*- coding: utf-8 -*-
from pandas import compat
import nose
from numpy import nan
import numpy as np
from pandas import Series, DataFrame
from pandas.compat import product
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
class TestRank(tm.TestCase):
_multiprocess_can_split_ = True
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method='average'):
result = s.rank(method=method)
assert_almost_equal(result, expected)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
def test_rank_methods_series(self):
tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
from scipy.stats import rankdata
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord('a') + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ['average', 'min', 'max', 'first', 'dense']:
result = ts.rank(m)
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
tm.assert_series_equal(result, Series(sprank, index=index))
def test_rank_methods_frame(self):
tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ['average', 'min', 'max', 'first', 'dense']:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(rankdata, ax, vals,
m if m != 'first' else 'ordinal')
expected = DataFrame(sprank, columns=cols)
tm.assert_frame_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ['O', 'f8', 'i8']
in_out = [([1], [1]),
([2], [1]),
([0], [1]),
([2,2], [1,1]),
([1,2,3], [1,2,3]),
([4,2,1], [3,2,1],),
([1,1,5,5,3], [1,1,3,3,2]),
([-5,-4,-3,-2,-1], [1,2,3,4,5])]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method='dense')
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
s = self.s.dropna()
df = self.df.dropna()
else:
s = self.s.astype(dtype)
df = self.df.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
assert_series_equal(res, expected)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
assert_series_equal(res2, expected)
expected = (df.max() - df).rank(method=method)
if dtype != 'O':
res2 = df.rank(method=method, ascending=False,
numeric_only=True)
assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False,
numeric_only=False)
assert_frame_equal(res3, expected)
def test_rank_2d_tie_methods(self):
s = self.s
df = self.df
def _check2d(df, expected, method='average', axis=0):
exp_df = DataFrame({'A': expected, 'B': expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, axis, dtype in product(results, [0, 1], dtypes):
if (dtype, method) in disabled:
continue
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, results[method], method=method, axis=axis)
def test_rank_int(self):
s = self.s.dropna().astype('i8')
for method, res in compat.iteritems(self.results):
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
assert_series_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| artistic-2.0 |
calancha/DIRAC | Core/Utilities/Graphs/QualityMapGraph.py | 10 | 6794 | ########################################################################
# $HeadURL$
########################################################################
""" QualityGraph represents a Quality Map of entities as a special color schema
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.BarGraph import BarGraph
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
from pylab import setp
from matplotlib.colors import normalize, LinearSegmentedColormap
import matplotlib.cm as cm
from matplotlib.colorbar import make_axes, ColorbarBase
from matplotlib.dates import date2num
cdict = {'red': ( ( 0.0, 1., 1.0 ),
( 0.5, .0, .0 ),
( 1.0, 0.0, 0.0 ) ),
'green': ( ( 0.0, 0.1, 0.1 ),
( 0.5, 0.9, 0.9 ),
( 1.0, 0.7, 0.7 ) ),
'blue': ( ( 0.0, 0.1, 0.1 ),
( 0.5, 0.5, 0.5 ),
( 1.0, 0.0, 0.0 ) )}
# color blind
#cdict = {'red': ((0.0, .5, 0.5),
# (0.5, .56, 0.56),
# (1.0, 0.63, 0.63)),
# 'green': ((0.0, 0., 0.),
# (0.5, 0.5, 0.5),
# (1.0, 1., 1.)),
# 'blue': ((0.0, 0., 0.),
# (0.5, 0.315, 0.315),
# (1.0, 0.63, 0.63))}
class QualityMapGraph( PlotBase ):
"""
The BarGraph class is a straightforward bar graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__( self, data, ax, prefs, *args, **kw ):
PlotBase.__init__( self, data, ax, prefs, *args, **kw )
if type( data ) == types.DictType:
self.gdata = GraphData( data )
elif type( data ) == types.InstanceType and data.__class__ == GraphData:
self.gdata = data
if self.prefs.has_key( 'span' ):
self.width = self.prefs['span']
else:
self.width = 1.0
if self.gdata.key_type == "time":
nKeys = self.gdata.getNumberOfKeys()
self.width = ( max( self.gdata.all_keys ) - min( self.gdata.all_keys ) ) / nKeys
# Setup the colormapper to get the right colors
self.cmap = LinearSegmentedColormap( 'quality_colormap', cdict, 256 )
#self.cmap = cm.RdYlGn
self.norms = normalize( 0, 100 )
mapper = cm.ScalarMappable( cmap = self.cmap, norm = self.norms )
mapper = cm.ScalarMappable( cmap = cm.RdYlGn, norm = self.norms )
def get_alpha( *args, **kw ):
return 1.0
mapper.get_alpha = get_alpha
self.mapper = mapper
def draw( self ):
PlotBase.draw( self )
self.x_formatter_cb( self.ax )
if self.gdata.isEmpty():
return None
tmp_x = []; tmp_y = []
# Evaluate the bar width
width = float( self.width )
offset = 0.
if self.gdata.key_type == 'time':
width = width / 86400.0
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp( to_timestamp( self.prefs['starttime'] ) ) )
end_plot = date2num( datetime.datetime.fromtimestamp( to_timestamp( self.prefs['endtime'] ) ) )
labels = self.gdata.getLabels()
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
tmp_x = []
tmp_y = []
self.bars = []
labels = self.gdata.getLabels()
nLabel = 0
labelNames = []
colors = []
xmin = None
xmax = None
for label, num in labels:
labelNames.append( label )
for key, value, error in self.gdata.getPlotNumData( label ):
if xmin is None or xmin > ( key + offset ):
xmin = key + offset
if xmax is None or xmax < ( key + offset ):
xmax = key + offset
if value is not None:
colors.append( self.getQualityColor( value ) )
tmp_x.append( key + offset )
tmp_y.append( 1. )
tmp_b.append( float( nLabel ) )
nLabel += 1
self.bars += self.ax.bar( tmp_x, tmp_y, bottom = tmp_b, width = width, color = colors )
dpi = self.prefs.get( 'dpi', 100 )
setp( self.bars, linewidth = pixelToPoint( 0.5, dpi ), edgecolor = '#AAAAAA' )
#pivots = keys
#for idx in range(len(pivots)):
# self.coords[ pivots[idx] ] = self.bars[idx]
ymax = float( nLabel )
self.ax.set_xlim( xmin = 0., xmax = xmax + width + offset )
self.ax.set_ylim( ymin = 0., ymax = ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin = start_plot, xmax = end_plot )
else:
self.ax.set_xlim( xmin = min( tmp_x ), xmax = max( tmp_x ) )
self.ax.set_yticks( [ i + 0.5 for i in range( nLabel ) ] )
self.ax.set_yticklabels( labelNames )
setp( self.ax.get_xticklines(), markersize = 0. )
setp( self.ax.get_yticklines(), markersize = 0. )
cax, kw = make_axes( self.ax, orientation = 'vertical', fraction = 0.07 )
cb = ColorbarBase( cax, cmap = cm.RdYlGn, norm = self.norms )
cb.draw_all()
#cb = self.ax.colorbar( self.mapper, format="%d%%",
# orientation='horizontal', fraction=0.04, pad=0.1, aspect=40 )
#setp( cb.outline, linewidth=.5 )
#setp( cb.ax.get_xticklabels(), size=10 )
#setp( cb.ax.get_xticklabels(), family=self.prefs['font_family'] )
#setp( cb.ax.get_xticklabels(), fontname = self.prefs['font'] )
def getQualityColor( self, value ):
if value is None or value < 0.:
return "#FFFFFF"
return self.mapper.to_rgba( value )
def getLegendData( self ):
return None
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i + .5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin = xmin, xmax = len( ticks ) )
elif self.gdata.key_type == "time":
#ax.set_xlim( xmin=self.begin_num,xmax=self.end_num )
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on( False )
sf = PrettyScalarFormatter()
ax.yaxis.set_major_formatter( sf )
#labels = ax.get_xticklabels()
else:
try:
super( BarGraph, self ).x_formatter_cb( ax )
except:
return None
| gpl-3.0 |
cl4rke/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
altescy/HandwrittenDigitRecognizer | dig_recognizer.py | 1 | 1567 | # -*- coding: utf-8 -*-
import numpy as np
from sklearn import datasets
from sklearn.externals import joblib
from chainer import serializers
from chainer import links as L, functions as F
import kNN
import NN
def softmax(data):
prob = np.ndarray(data.shape, dtype=np.float32)
sig = np.sum(np.exp(data))
for i, d in enumerate(data):
prob[i] = (np.exp(d) / sig)
return prob
def kNN_method(dig):
digits = datasets.load_digits()
train = digits.data[:1000]
train_target = digits.target[:1000]
return np.array([kNN.kNN(16.0*dig, train, train_target, k=5, numc=10)])
def linear_clasiffer(dig):
dig = np.hstack((1,dig))
W = np.load('./models/weight_linear_digit_classifer.npy')
y = np.dot(W.T, dig)
return softmax(y)
def logistic_regression(dig):
clf = joblib.load('./models/sklearn_mnist88_lr/mnist88.pkl')
y = clf.predict_proba(16*dig.reshape(1,-1))[0]
return softmax(y)
def svm(dig):
clf = joblib.load('./models/svm_mnist28x28/svm28x28.pkl')
y = clf.predict(dig.reshape(1,-1))
return np.array([y[0]])
def nn28x28(dig):
model = L.Classifier(NN.MLP())
serializers.load_npz('./models/3lmnist28x28.npz', model)
y = model.predictor(dig.astype(np.float32))
return F.softmax(y).data
def cnn28x28(dig):
dig = dig.reshape(len(dig),1,28,28)
model = L.Classifier(NN.CNN())
serializers.load_npz('./models/cnnmnist28x28.npz', model)
y = model.predictor(dig)
return F.softmax(y).data
| gpl-3.0 |
meduz/NeuroTools | test/test_plotting.py | 2 | 5059 | """
Unit tests for the NeuroTools.plotting module
"""
import matplotlib
matplotlib.use('Agg')
import unittest
from NeuroTools import plotting
import pylab
import os
class PylabParamsTest(unittest.TestCase):
def runTest(self):
# define arbitrary values
fig_width_pt = 123.4
ratio = 0.1234
text_fontsize = 10
tick_labelsize = 8
useTex = False
inches_per_pt = 1.0/72.27 # Convert pt to inch
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*ratio # height in inches
testDict = {
'axes.labelsize' : text_fontsize,
'font.size' : text_fontsize,
'xtick.labelsize' : tick_labelsize,
'ytick.labelsize' : tick_labelsize,
'text.usetex' : useTex,
'figure.figsize' : [fig_width, fig_height]}
plotting.set_pylab_params(fig_width_pt=fig_width_pt, ratio=ratio,
text_fontsize=text_fontsize,
tick_labelsize=tick_labelsize, useTex=useTex)
for k in testDict.keys():
assert pylab.rcParams.has_key(k)
assert pylab.rcParams[k] == testDict[k]
class GetDisplayTest(unittest.TestCase):
def runTest(self):
a = plotting.get_display(True)
assert a != None
a = plotting.get_display(False)
assert a == None
a = plotting.get_display(1234)
assert a == 1234
class ProgressBarTest(unittest.TestCase):
def runTest(self):
import time
print '\nINFO: Testing progress bar...'
for i in range(100):
plotting.progress_bar(i/100.)
time.sleep(.01)
print '\n'
class Save2DImageTest(unittest.TestCase):
def runTest(self):
import numpy
mat = numpy.random.random([50,50])
filename = 'deleteme.png'
if os.path.exists(filename): os.remove(filename)
plotting.save_2D_image(mat, filename)
assert os.path.exists(filename)
os.remove(filename)
class Save2DMovieTest(unittest.TestCase):
def runTest(self):
import numpy
frames = []
duration = 0.1
for i in range(10):
frames.append(numpy.random.randint(0,255,[10,10]))
filename = 'deleteme.zip'
if os.path.exists(filename): os.remove(filename)
plotting.save_2D_movie(frames, filename, duration)
assert os.path.exists(filename)
os.remove(filename)
class SetLabelsTest(unittest.TestCase):
def runTest(self):
f = plotting.get_display(True)
x = range(10)
p = pylab.plot(x)
plotting.set_labels(pylab, 'the x axis', 'the y axis')
# set up a SimpleMultiplot with arbitrary values
self.nrows = 1
self.ncolumns = 1
title = 'testMultiplot'
xlabel = 'testXlabel'
ylabel = 'testYlabel'
scaling = ('linear','log')
self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling)
plotting.set_labels(self.smt.panel(0), 'the x axis', 'the y axis')
class SetAxisLimitsTest(unittest.TestCase):
def runTest(self):
f = plotting.get_display(True)
x = range(10)
pylab.plot(x)
plotting.set_axis_limits(pylab, 0., 123., -123., 456.)
# set up a SimpleMultiplot with arbitrary values
self.nrows = 1
self.ncolumns = 1
title = 'testMultiplot'
xlabel = 'testXlabel'
ylabel = 'testYlabel'
scaling = ('linear','log')
self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling)
plotting.set_axis_limits(self.smt.panel(0), 0., 123., -123., 456.)
class SimpleMultiplotTest(unittest.TestCase):
def setUp(self):
# define arbitrary values
self.nrows = 4
self.ncolumns = 5
title = 'testMultiplot'
xlabel = 'testXlabel'
ylabel = 'testYlabel'
scaling = ('linear','log')
self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling)
class SimpleMultiplotSaveTest(SimpleMultiplotTest):
def runTest(self):
filename = "deleteme.png"
if os.path.exists(filename): os.remove(filename)
self.smt.save(filename)
assert os.path.exists(filename)
os.remove(filename)
class SimpleMultiplotSetFrameTest(SimpleMultiplotTest):
def runTest(self):
numPanels = self.nrows * self.ncolumns
boollist = [True,False,False,True]
for i in range(numPanels):
ax_indexed = self.smt.panel(i)
ax_next = self.smt.next_panel()
assert ax_indexed == ax_next
self.smt.set_frame(ax_indexed,boollist,linewidth=4)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
heli522/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 17 | 24947 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1)
Y_norm_sq = (Y ** 2).sum(axis=1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
cosmicBboy/themis-ml | themis_ml/postprocessing/reject_option_classification.py | 1 | 6231 | """Post-processing estimators to make fair predictions."""
import numpy as np
from sklearn.utils.validation import check_array, check_X_y, check_is_fitted
from sklearn.base import (
BaseEstimator, ClassifierMixin, MetaEstimatorMixin, clone)
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from ..checks import check_binary
DECISION_THRESHOLD = 0.5
DEFAULT_ENSEMBLE_ESTIMATORS = [
LogisticRegression(), DecisionTreeClassifier()]
class SingleROClassifier(
BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
S_ON_FIT = False
S_ON_PREDICT = True
def __init__(self, estimator=LogisticRegression(), theta=0.1, demote=True):
"""Initialize Single Reject-Option Classifier.
This fairness-aware technique produces fair predictions with the
following heuristic:
- an training an initial classifier on dataset D
- generating predicted probabilities on the test set
- computing the proximity of each prediction to the decision boundary
learned by the classifier
- within the critical region threshold theta around the decision
boundary, where 0.5 < theta < 1, X_s1 (disadvantaged observations)
are assigned as y+ and X_s0 (advantaged observations are assigned as
y-.
param BaseEstimator estimator: LogisticRegression by default
param float theta: critical region threshold for demoting advantaged
group and promoting advantaged group
param bool demote: if True, demotes +ve labelled advantaged group
observations at predict time. If False, only promote -ve labelled
disadvantaged group observations at predict time.
"""
# TODO: assert that estimator has a predict_proba method.
self.estimator = estimator
self.theta = theta
self.demote = demote
def fit(self, X, y):
"""Fit model."""
X, y = check_X_y(X, y)
y = check_binary(y)
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y)
return self
def predict(self, X, s):
"""Generate predicted labels."""
return (
self.predict_proba(X, s)[:, 1] > DECISION_THRESHOLD).astype(int)
def predict_proba(self, X, s):
"""Generate predicted probabilities."""
pred_prob = self._raw_predict_proba(X, s)[:, 1]
return self._flip_predictions(pred_prob, s)
def _raw_predict_proba(self, X, s):
X = check_array(X)
s = check_binary(np.array(s).astype(int))
check_is_fitted(self, ["estimator_"])
return self.estimator_.predict_proba(X)
def _flip_predictions(self, pred_prob, s):
"""Flip predictions based on protected class membership.
:param np.array[float] pred_prob: predicted probabilities
:param np.array[int] s: protected class membership, where
1 = disadvantaged group, 0 = advantaged group.
"""
flip_candidates = np.ones_like(pred_prob).astype(bool) \
if self.demote else s == 1
# find index where predictions are below theta threshold
under_theta_index = np.where(
(np.abs(pred_prob - 0.5) < self.theta) & flip_candidates)
# flip the probability
pred_prob[under_theta_index] = 1 - pred_prob[under_theta_index]
pred_prob = pred_prob.reshape(-1, 1)
return np.concatenate([1 - pred_prob, pred_prob], axis=1)
class MultipleROClassifier(SingleROClassifier):
def __init__(
self, estimators=DEFAULT_ENSEMBLE_ESTIMATORS,
theta=0.1, demote=True, weighted_prediction=True):
"""Initialize Multiple Reject-Option Classifier.
param list|tuple[BaseEstimator] estimators: A list or tuple of
estimators to train multiple classifiers. By default, use
LogisticRegression and DecisionTreeClassifier.
param bool weighted_prediction: if True, uses the training accuracy
as weights to compute ensembled prediction
param bool demote: if True, demotes +ve labelled advantaged group
observations at predict time. If False, only promote -ve labelled
disadvantaged group observations at predict time.
param bool weighted_prediction: if True, then uses accuracy score as
weights to compute ensembled predicted probability. If False,
ensembled probability is the mean of probabilities.
"""
# TODO: assert that all estimators have a predict_proba method.
# TODO: add support for customizing the performance function used
# to compute the estimator weights used in the ensembled prediction.
# Currently this class only supports accuracy.
super(MultipleROClassifier, self).__init__()
self.estimators = estimators
self.demote = demote
self.weighted_prediction = weighted_prediction
def fit(self, X, y):
"""Fit model."""
X, y = check_X_y(X, y)
y = check_binary(y)
self.estimators_ = []
self.pred_weights_ = []
for estimator in self.estimators:
e = clone(estimator)
self.estimators_.append(e.fit(X, y))
# uniform weights if weighted_prediction is False
self.pred_weights_.append(
accuracy_score(y, e.predict(X)) if self.weighted_prediction
else 1.0)
self.pred_weights_ = np.array(self.pred_weights_)
return self
def _raw_predict_proba(self, X, s):
X = check_array(X)
s = check_binary(np.array(s).astype(int))
check_is_fitted(self, ["estimators_", "pred_weights_"])
# use uniform weights if pred_weights_ is False otherwise use
# performance scores learned during
pred_probs = np.concatenate([
e.predict_proba(X)[:, 1].reshape(-1, 1) * w for e, w in
zip(self.estimators_, self.pred_weights_)
], axis=1).sum(axis=1) / self.pred_weights_.sum()
pred_probs = pred_probs.reshape(-1, 1)
return np.concatenate([1 - pred_probs, pred_probs], axis=1)
| mit |
MikeWoodward/UT330B | UT330BUI/view/readdisplay.py | 1 | 5818 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on: 15:54:55 05-Jan-2020
Author: Mike Woodward
This code is licensed under the MIT license
"""
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (Div, FileInput, Panel)
from bokeh.plotting import Figure
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, LinearAxis, Range1d
import pandas as pd
from io import BytesIO
import base64
# %%---------------------------------------------------------------------------
# ReadDisplay
# -----------------------------------------------------------------------------
class ReadDisplay():
"""Reads data saved to file into the system and displays it."""
# %%
def __init__(self, controller):
"""Method sets up object. First part of two-part initialization."""
self.controller = controller
# Header for section to get file
self.file_header =\
Div(text="""<span style='font-weight:bold'>"""
"""Choose the file to display</span>""",
sizing_mode='stretch_width')
# Selects the data file to read into the system
self.select_file = FileInput(accept=".csv",
sizing_mode='stretch_width')
# Shows summary and status for data read in.
self.status = Div(text="""No file connected""",
sizing_mode='stretch_width')
# Chart to show temperature and/or humidity.
self.temphumidity = Figure(x_axis_type='datetime',
title="Humidity & temperature by datetime",
x_axis_label='Datetime',
y_axis_label='Temperature (C)')
df = pd.DataFrame(
{'Timestamp': [pd.to_datetime('4/12/2016 8:15:33 AM')],
'Temperature (C)': [25.0],
'Relative humidity (%)': [40.0]})
self.cds = ColumnDataSource(df)
self.temphumidity.line(x='Timestamp',
y='Temperature (C)',
line_color='red',
legend_label='Temperature (C)',
line_width=2,
line_alpha=0.5,
source=self.cds)
self.temphumidity.extra_y_ranges = \
{"humidity": Range1d(start=0, end=100)}
self.temphumidity.add_layout(
LinearAxis(y_range_name="humidity",
axis_label='Humidity (%)'), 'right')
self.temphumidity.line(x='Timestamp',
y='Relative humidity (%)',
legend_label='Relative humidity (%)',
line_color='blue',
line_width=2,
line_alpha=0.5,
source=self.cds,
y_range_name="humidity")
self.temphumidity.legend.click_policy = "hide"
self.temphumidity.title.text_font_size = '20px'
self.temphumidity.xaxis.axis_label_text_font_size = '15px'
self.temphumidity.xaxis.major_label_text_font_size = '15px'
self.temphumidity.yaxis.axis_label_text_font_size = '15px'
self.temphumidity.yaxis.major_label_text_font_size = '15px'
# Layout
self.layout = row(
children=[column(children=[self.file_header,
self.select_file,
self.status],
sizing_mode='fixed',
width=250, height=80),
column(self.temphumidity, sizing_mode='stretch_both')],
sizing_mode='stretch_both')
self.panel = Panel(child=self.layout, title='Read & display')
# %%
def setup(self):
"""Method sets up object. Second part of two-part initialization."""
self.select_file.on_change("value", self.callback_select_file)
# %%
def update(self):
"""Method updates object."""
pass
# %%
def callback_select_file(self, attrname, old, new):
"""Callback method for select file"""
self.status.text = 'Reading in the data file....'
# Convert the data to a Pandas dataframe
convert = BytesIO(base64.b64decode(self.select_file.value))
df = pd.read_csv(convert)
# Check the Pandas dataframe has the correct fields
if set(df.columns) != set(['Timestamp',
'Temperature (C)',
'Relative humidity (%)',
'Pressure (Pa)']):
self.status.text = ("""The file {0} has the columns {1} """
"""when it should have the columns {2} """
.format(self.select_file.filename,
set(df.columns),
set(['Timestamp',
'Temperature (C)',
'Relative humidity (%)',
'Pressure (Pa)'])))
return
# Make sure the data types are correct
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
self.cds.data = {'Timestamp': df['Timestamp'],
'Temperature (C)': df['Temperature (C)'],
'Relative humidity (%)': df['Relative humidity (%)']}
self.status.text = 'Read in the data file correctly.'
| mit |
germs-lab/tutorials | docs/pandaseq/source/conf.py | 1 | 8218 | # -*- coding: utf-8 -*-
#
# Pandaseq Tutorial documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 2 15:31:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pandaseq Tutorial'
copyright = u'2015, Adina Howe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PandaseqTutorialdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PandaseqTutorial.tex', u'Pandaseq Tutorial Documentation',
u'Adina Howe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pandaseqtutorial', u'Pandaseq Tutorial Documentation',
[u'Adina Howe'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PandaseqTutorial', u'Pandaseq Tutorial Documentation',
u'Adina Howe', 'PandaseqTutorial', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
uvemas/ViTables | examples/scripts/pandas_timeseries1.py | 1 | 1514 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2019 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - [email protected]
"""Storing time series created with Pandas in PyTables. Example 1.
"""
import os
import numpy as np
import pandas as pd
# Create a DataFrame with a DateTimeIndex and linear data
dti = pd.date_range(start='1/1/2019', periods=365, name='Days')
ts = pd.Series(np.arange(1, 366), index=dti)
df = pd.DataFrame(ts)
# Create an empty HDFStore
output_dir = '../timeseries'
hdf5_name = 'pandas_test1.hdf5'
filepath_hdf5 = os.path.join(output_dir, hdf5_name)
try:
os.mkdir(output_dir)
except OSError:
pass
finally:
store = pd.HDFStore(filepath_hdf5)
# Store the dataframe as a PyTables Table under the root group
store.append('', df)
store.close()
| gpl-3.0 |
ptrendx/mxnet | python/mxnet/notebook/callback.py | 22 | 14081 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, missing-docstring, no-init, old-style-class, multiple-statements
# pylint: disable=arguments-differ, too-many-arguments, no-member
"""Visualization callback function
"""
try:
import datetime
except ImportError:
class Datetime_Failed_To_Import: pass
datetime = Datetime_Failed_To_Import
try:
import bokeh.plotting
except ImportError:
pass
try:
from collections import defaultdict
except ImportError:
class Defaultdict_Failed_To_Import: pass
defaultdict = Defaultdict_Failed_To_Import
try:
import pandas as pd
except ImportError:
class Pandas_Failed_To_Import: pass
pd = Pandas_Failed_To_Import
import time
# pylint: enable=missing-docstring, no-init, old-style-class, multiple-statements
def _add_new_columns(dataframe, metrics):
"""Add new metrics as new columns to selected pandas dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
metrics : metric.EvalMetric
New metrics to be added.
"""
#TODO(leodirac): we don't really need to do this on every update. Optimize
new_columns = set(metrics.keys()) - set(dataframe.columns)
for col in new_columns:
dataframe[col] = None
def _extend(baseData, newData):
"""Assuming a is shorter than b, copy the end of b onto a
"""
baseData.extend(newData[len(baseData):])
class PandasLogger(object):
"""Logs statistics about training run into Pandas dataframes.
Records three separate dataframes: train, eval, epoch.
Parameters
----------
batch_size: int
batch_size of data
frequent: int
How many training mini-batches between calculations.
Defaults to calculating every 50 batches.
(Eval data is stored once per epoch over the entire
eval data set.)
"""
def __init__(self, batch_size, frequent=50):
self.batch_size = batch_size
self.frequent = frequent
self._dataframes = {
'train': pd.DataFrame(),
'eval': pd.DataFrame(),
'epoch': pd.DataFrame(),
}
self.last_time = time.time()
self.start_time = datetime.datetime.now()
self.last_epoch_time = datetime.datetime.now()
@property
def train_df(self):
"""The dataframe with training data.
This has metrics for training minibatches, logged every
"frequent" batches. (frequent is a constructor param)
"""
return self._dataframes['train']
@property
def eval_df(self):
"""The dataframe with evaluation data.
This has validation scores calculated at the end of each epoch.
"""
return self._dataframes['eval']
@property
def epoch_df(self):
"""The dataframe with epoch data.
This has timing information.
"""
return self._dataframes['epoch']
@property
def all_dataframes(self):
"""Return a dict of dataframes
"""
return self._dataframes
def elapsed(self):
"""Calcaulate the elapsed time from training starting.
"""
return datetime.datetime.now() - self.start_time
def append_metrics(self, metrics, df_name):
"""Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified.
"""
dataframe = self._dataframes[df_name]
_add_new_columns(dataframe, metrics)
dataframe.loc[len(dataframe)] = metrics
def train_cb(self, param):
"""Callback funtion for training.
"""
if param.nbatch % self.frequent == 0:
self._process_batch(param, 'train')
def eval_cb(self, param):
"""Callback function for evaluation
"""
self._process_batch(param, 'eval')
def _process_batch(self, param, dataframe):
"""Update parameters for selected dataframe after a completed batch
Parameters
----------
dataframe : pandas.DataFrame
Selected dataframe needs to be modified.
"""
now = time.time()
if param.eval_metric is not None:
metrics = dict(param.eval_metric.get_name_value())
param.eval_metric.reset()
else:
metrics = {}
# #11504
try:
speed = self.frequent / (now - self.last_time)
except ZeroDivisionError:
speed = float('inf')
metrics['batches_per_sec'] = speed * self.batch_size
metrics['records_per_sec'] = speed
metrics['elapsed'] = self.elapsed()
metrics['minibatch_count'] = param.nbatch
metrics['epoch'] = param.epoch
self.append_metrics(metrics, dataframe)
self.last_time = now
def epoch_cb(self):
"""Callback function after each epoch. Now it records each epoch time
and append it to epoch dataframe.
"""
metrics = {}
metrics['elapsed'] = self.elapsed()
now = datetime.datetime.now()
metrics['epoch_time'] = now - self.last_epoch_time
self.append_metrics(metrics, 'epoch')
self.last_epoch_time = now
def callback_args(self):
"""returns **kwargs parameters for model.fit()
to enable all callbacks. e.g.
model.fit(X=train, eval_data=test, **pdlogger.callback_args())
"""
return {
'batch_end_callback': self.train_cb,
'eval_end_callback': self.eval_cb,
'epoch_end_callback': self.epoch_cb,
}
class LiveBokehChart(object):
"""Callback object that renders a bokeh chart in a jupyter notebook
that gets updated as the training run proceeds.
Requires a PandasLogger to collect the data it will render.
This is an abstract base-class. Sub-classes define the specific chart.
"""
def __init__(self, pandas_logger, metric_name, display_freq=10,
batch_size=None, frequent=50):
if pandas_logger:
self.pandas_logger = pandas_logger
else:
self.pandas_logger = PandasLogger(batch_size=batch_size, frequent=frequent)
self.display_freq = display_freq
self.last_update = time.time()
#NOTE: would be nice to auto-detect the metric_name if there's only one.
self.metric_name = metric_name
bokeh.io.output_notebook()
self.handle = self.setup_chart()
def setup_chart(self):
"""Render a bokeh object and return a handle to it.
"""
raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed")
def update_chart_data(self):
"""Update the bokeh object with new data.
"""
raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed")
def interval_elapsed(self):
"""Check whether it is time to update plot.
Returns
-------
Boolean value of whethe to update now
"""
return time.time() - self.last_update > self.display_freq
def _push_render(self):
"""Render the plot with bokeh.io and push to notebook.
"""
bokeh.io.push_notebook(handle=self.handle)
self.last_update = time.time()
def _do_update(self):
"""Update the plot chart data and render the updates.
"""
self.update_chart_data()
self._push_render()
def batch_cb(self, param):
"""Callback function after a completed batch.
"""
if self.interval_elapsed():
self._do_update()
def eval_cb(self, param):
"""Callback function after an evaluation.
"""
# After eval results, force an update.
self._do_update()
def callback_args(self):
"""returns **kwargs parameters for model.fit()
to enable all callbacks. e.g.
model.fit(X=train, eval_data=test, **pdlogger.callback_args())
"""
return {
'batch_end_callback': self.batch_cb,
'eval_end_callback': self.eval_cb,
}
class LiveTimeSeries(LiveBokehChart):
"""Plot the elasped time during live learning.
"""
def __init__(self, **fig_params):
self.fig = bokeh.plotting.Figure(x_axis_type='datetime',
x_axis_label='Elapsed time', **fig_params)
super(LiveTimeSeries, self).__init__(None, None) # TODO: clean up this class hierarchy
def setup_chart(self):
self.start_time = datetime.datetime.now()
self.x_axis_val = []
self.y_axis_val = []
self.fig.line(self.x_axis_val, self.y_axis_val)
return bokeh.plotting.show(self.fig, notebook_handle=True)
def elapsed(self):
"""Calculate elasped time from starting
"""
return datetime.datetime.now() - self.start_time
def update_chart_data(self, value):
self.x_axis_val.append(self.elapsed())
self.y_axis_val.append(value)
self._push_render()
class LiveLearningCurve(LiveBokehChart):
"""Draws a learning curve with training & validation metrics
over time as the network trains.
"""
def __init__(self, metric_name, display_freq=10, frequent=50):
self.frequent = frequent
self.start_time = datetime.datetime.now()
self._data = {
'train': {'elapsed': [],},
'eval': {'elapsed': [],},
}
super(LiveLearningCurve, self).__init__(None, metric_name, display_freq, frequent)
def setup_chart(self):
self.fig = bokeh.plotting.Figure(x_axis_type='datetime',
x_axis_label='Training time')
#TODO(leodirac): There's got to be a better way to
# get a bokeh plot to dynamically update as a pandas dataframe changes,
# instead of copying into a list.
# I can't figure it out though. Ask a pyData expert.
self.x_axis_val1 = []
self.y_axis_val1 = []
self.train1 = self.fig.line(self.x_axis_val1, self.y_axis_val1, line_dash='dotted',
alpha=0.3, legend="train")
self.train2 = self.fig.circle(self.x_axis_val1, self.y_axis_val1, size=1.5,
line_alpha=0.3, fill_alpha=0.3, legend="train")
self.train2.visible = False # Turn this on later.
self.x_axis_val2 = []
self.y_axis_val2 = []
self.valid1 = self.fig.line(self.x_axis_val2, self.y_axis_val2,
line_color='green',
line_width=2,
legend="validation")
self.valid2 = self.fig.circle(self.x_axis_val2,
self.y_axis_val2,
line_color='green',
line_width=2, legend=None)
self.fig.legend.location = "bottom_right"
self.fig.yaxis.axis_label = self.metric_name
return bokeh.plotting.show(self.fig, notebook_handle=True)
def _do_update(self):
self.update_chart_data()
self._push_render()
def batch_cb(self, param):
if param.nbatch % self.frequent == 0:
self._process_batch(param, 'train')
if self.interval_elapsed():
self._do_update()
def eval_cb(self, param):
# After eval results, force an update.
self._process_batch(param, 'eval')
self._do_update()
def _process_batch(self, param, df_name):
"""Update selected dataframe after a completed batch
Parameters
----------
df_name : str
Selected dataframe name needs to be modified.
"""
if param.eval_metric is not None:
metrics = dict(param.eval_metric.get_name_value())
param.eval_metric.reset()
else:
metrics = {}
metrics['elapsed'] = datetime.datetime.now() - self.start_time
for key, value in metrics.items():
if key not in self._data[df_name]:
self._data[df_name][key] = []
self._data[df_name][key].append(value)
def update_chart_data(self):
dataframe = self._data['train']
if len(dataframe['elapsed']):
_extend(self.x_axis_val1, dataframe['elapsed'])
_extend(self.y_axis_val1, dataframe[self.metric_name])
dataframe = self._data['eval']
if len(dataframe['elapsed']):
_extend(self.x_axis_val2, dataframe['elapsed'])
_extend(self.y_axis_val2, dataframe[self.metric_name])
if len(dataframe) > 10:
self.train1.visible = False
self.train2.visible = True
def args_wrapper(*args):
"""Generates callback arguments for model.fit()
for a set of callback objects.
Callback objects like PandasLogger(), LiveLearningCurve()
get passed in. This assembles all their callback arguments.
"""
out = defaultdict(list)
for callback in args:
callback_args = callback.callback_args()
for k, v in callback_args.items():
out[k].append(v)
return dict(out)
| apache-2.0 |
drpngx/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
tejasnikumbh/ThesisCode | lib/python2.7/site-packages/numpy/linalg/linalg.py | 35 | 67345 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.