repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Scarzy/LazyWorship
|
python_src/classifier.py
|
1
|
9498
|
from __future__ import division
import collections
import itertools
import json
import matplotlib.axes
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
import numpy
import numpy.fft
import sklearn.decomposition
import sklearn.mixture
import struct
import wave
LYRIC_WEIGHT = 250000000
TIME_WEIGHT = 1000000000
# Represents a Lyric Section
# index is the index number in the collection
# time_indicies is a tuple of 2-tuples of start and end time of the lyric section in the song
# text is the text to display on the screen
LyricSection = collections.namedtuple('LyricSection', ('index', 'time_indicies', 'text'))
# Represents a DataPoint in our feature space (i.e. a window of the audio we've heard)
DataPoint = collections.namedtuple('DataPoint', ('fft', 'last_window_lyric_index', 'last_actual_lyric_index', 'relative_time_ratio'))
class SongClassifier(object):
def __init__(self, lyrics):
self.lyrics = lyrics
self.last_actual_lyric = -1
self.last_window_lyric = -1
self.first_segment_length = -1
self.current_segment_start = 0
self.pca = None
self.gmm = None
def fit(self, training_data):
lyric_feature_coordinates = {l: list(_data_point_to_feature_coordinates(dp) for dp in data) for l, data in training_data.iteritems()}
pca_data = list(itertools.chain(*lyric_feature_coordinates.values()))
self.pca = sklearn.decomposition.PCA(n_components=20)
self.pca.fit(pca_data)
lyric_gmm_data = {l: self.pca.transform(d) for l, d in lyric_feature_coordinates.iteritems()}
lyric_means = [ld.mean() for ld in lyric_gmm_data.values()]
self.gmm = sklearn.mixture.GMM(len(lyrics), covariance_type='full')
self.gmm.means_ = lyric_means
gmm_data = numpy.array([itertools.chain(list(v) for v in lyric_gmm_data.values())])
self.gmm.fit(gmm_data)
def predict(self, window, start_time):
predict_last_window_lyric = (self.last_window_lyric * LYRIC_WEIGHT)
predict_last_actual_lyric = self.last_actual_lyric * LYRIC_WEIGHT
predict_relative_time = start_time - self.current_segment_start
predict_relative_time_ratio = predict_relative_time / (self.first_segment_length if self.first_segment_length != -1 else self.lyrics[0].time_indicies[0][1]) * TIME_WEIGHT
fft = numpy.fft.fft(window)
predict_dp = DataPoint(fft, predict_last_window_lyric, predict_last_actual_lyric, predict_relative_time)
predict_feature_coordinate = _data_point_to_feature_coordinates(predict_dp)
gmm_data = self.pca.transform(predict_feature_coordinate)
lyric_index = self.gmm.predict(gmm_data)
if lyric_index != self.last_window_lyric and self.last_window_lyric != -1:
self.last_actual_lyric = self.last_window_lyric
if self.first_segment_length == -1:
self.first_segment_length = absolute_time
self.last_window_lyric = lyric_index
return lyric_index
# A list of 3-tuples of start time, end time and lyrics text
def generate_training_data(wav_file_path, lyrics, generate_fft_images=False, generate_pca_image=False):
window_size_frames = 40000
window_interval = int(window_size_frames / 10)
frame_rate, windows = _load_wav_file(wav_file_path, window_size_frames, window_interval)
window_size_ms = window_size_frames / frame_rate
# Dictionary of lyric to list of data points
lyric_data = {l: [] for l in lyrics}
last_data_point_lyric = None
for absolute_time, window in windows:
# FFT each window to get the frequencies
window_fft = numpy.absolute(numpy.fft.fft(window))
# Remove anything below 20Hz because humans can't hear that
window_fft[0:19] = (0,) * 19
# Remove anything above 5kHz because intruments don't go that high
window_fft = window_fft[:5000]
if generate_fft_images:
plt.subplot(121)
plt.plot(window)
plt.subplot(122)
plt.plot(window_fft)
plt.show()
plt.savefig('fft_{0}.png'.format(i))
plt.clf()
# Get the lyrics
lyric, lyric_start_time, lyric_end_time = _find_lyric(lyrics, absolute_time)
# Compute the time we are at in the current lyric, as a ratio of the length of the first lyric
relative_time = absolute_time - lyric_start_time
relative_time_ratio = relative_time / (lyric_end_time - lyric_start_time)
# Get the verse of the last window and the last verse we were in
verses = ((last_data_point_lyric.index if last_data_point_lyric is not None else 0),
((lyric.index - 1)))
verses = numpy.array(verses)
data_point = DataPoint(window_fft, verses[0], verses[1], relative_time_ratio)
lyric_data[lyric].append(data_point)
last_data_point_lyric = lyric
if generate_pca_image:
# Use Principle Component Analysis to get the 3 dimensions with the greatest variance
lyric_feature_coordinates = {l: list(_data_point_to_feature_coordinates(dp) for dp in data) for l, data in lyric_data.iteritems()}
pca_data = list(itertools.chain(*lyric_feature_coordinates.values()))
pca_3d = sklearn.decomposition.PCA(n_components=3)
pca_3d.fit(pca_data)
# Now graph them
transformed_lyric_data = {l: pca_3d.transform(data).transpose() for l, data in lyric_feature_coordinates.iteritems()}
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for (l, data), color in zip(transformed_lyric_data.iteritems(), 'cb'):
ax.scatter(*data, c=color)
plt.savefig('pca_3d.png')
return lyric_data
pca = sklearn.decomposition.PCA(n_components=20)
pca.fit(pca_data)
lyric_gmm_data = {l: pca.transform(d) for l, d in lyric_data.iteritems()}
lyric_means = [ld.mean() for ld in lyric_gmm_data.values()]
return lyric_means, list(itertools.chain(*lyric_gmm_data.values()))
gmm = sklearn.mixture.GMM(len(lyrics), covariance_type='full')
gmm.means_ = lyric_means
gmm.fit(itertools.chain(lyric_gmm_data.values()))
last_actual_lyric = -1
last_window_lyric = -1
first_segment_length = -1
current_segment_start = 0
errors = 0
for i, (lyric, fft) in enumerate(ffts):
fft[-3] = (last_window_lyric * LYRIC_WEIGHT)
fft[-2] = last_actual_lyric * LYRIC_WEIGHT
absolute_time = i * windowSizeFrames / frameRate
relative_time = absolute_time - current_segment_start
relative_time_ratio = relative_time / (first_segment_length if first_segment_length != -1 else lyrics[0][2])
fft[-1] = relative_time_ratio * TIME_WEIGHT
gmm_data = pca.transform(fft)
lyric_index = gmm.predict(gmm_data)
if lyrics[lyric_index] != lyric:
print 'Failed to predict {0} (chose {1} instead of {2})'.format(i, lyric_index, lyric[0])
errors += 1
if lyric_index != last_window_lyric and last_window_lyric != -1:
last_actual_lyric = last_window_lyric
if first_segment_length == -1:
first_segment_length = absolute_time
last_window_lyric = lyric_index
print 'Made {0} errors'.format(errors)
def _find_lyric(lyrics, time):
for l in lyrics:
for start, end in l.time_indicies:
if time >= start and time < end:
return l, start, end
else:
raise KeyError('Couldn\'t find a lyric at {0}'.format(time))
def _data_point_to_feature_coordinates(dp):
return numpy.concatenate((dp.fft, numpy.array([dp.last_window_lyric_index * LYRIC_WEIGHT, dp.last_actual_lyric_index * LYRIC_WEIGHT, dp.relative_time_ratio * TIME_WEIGHT])))
def _load_wav_file(wav_file_path, window_size_frames, window_interval):
wave_file = wave.open(wav_file_path, 'r')
try:
length = wave_file.getnframes()
wave_data = wave_file.readframes(length)
wave_data = struct.unpack('<' + ('h' * int(len(wave_data) / 2)), wave_data)
finally:
wave_file.close()
def window_generator():
for i in xrange(0, length - window_size_frames, window_interval):
yield i / frame_rate, wave_data[i:(i + window_size_frames)]
frame_rate = wave_file.getframerate()
return frame_rate, window_generator()
if __name__ == '__main__':
training_file = 'chris_tomlin-amazing_grace-training.wav'
lyrics = [LyricSection(0, ((0, 15.5),), 'Amazing grace how sweet the sound, that saved a wretch like me'),
LyricSection(1, ((15.5, 31),), 'I once was lost, but now am found. Was blind but now I see.')]
td = generate_training_data(training_file, lyrics, generate_pca_image=True)
classifier = SongClassifier(lyrics)
classifier.fit(td)
window_size_frames = 40000
window_interval = int(window_size_frames / 10)
frame_rate, windows = _load_wav_file(training_file, window_size_frames, window_interval)
errors = 0
for i, (absolute_time, w) in enumerate(windows):
correct_lyric, _, _ = _find_lyric(lyrics, absolute_time)
predicted_lyric = classifier.predict(w, absolute_time)
if correct_lyric.index != predicted_lyric:
errors += 1
print 'Failed to correctly predict {0] (guessed {1}, expected {2})'.format(i, predicted_lyric, correct_lyric)
print 'Made {0} errors'.format(errors)
|
gpl-3.0
|
shyamalschandra/copperhead
|
samples/mandelbrot.py
|
5
|
1382
|
from copperhead import *
@cu
def z_square(z):
real, imag = z
return real * real - imag * imag, 2 * real * imag
@cu
def z_magnitude(z):
real, imag = z
return sqrt(real * real + imag * imag)
@cu
def z_add((z0r, z0i), (z1r, z1i)):
return z0r + z1r, z0i + z1i
@cu
def mandelbrot_iteration(z0, z, i, m, t):
z = z_add(z_square(z), z0)
escaped = z_magnitude(z) > m
converged = i > t
done = escaped or converged
if not done:
return mandelbrot_iteration(z0, z, i+1, m, t)
else:
return i
@cu
def mandelbrot(lb, scale, (x, y), m, t):
def mandelbrot_el(zi):
return mandelbrot_iteration(zi, zi, 0, m, t)
def index(i):
scale_x, scale_y = scale
lb_x, lb_y = lb
return float32(i % x) * scale_x + lb_x, float32(i / x) * scale_y + lb_y
two_d_points = map(index, range(x*y))
return map(mandelbrot_el, two_d_points)
lb = (np.float32(-2.5), np.float32(-2.0))
ub = (np.float32(1.5), np.float32(2.0))
x, y = 1000, 1000
scale = ((ub[0]-lb[0])/np.float32(x), (ub[0]-lb[0])/np.float32(y))
max_iterations = 100
diverge_threshold = np.float32(4.0)
print("Calculating...")
result = mandelbrot(lb, scale, (x,y), diverge_threshold, max_iterations)
print("Plotting...")
import matplotlib.pyplot as plt
im_result = to_numpy(result).reshape([x, y])
plt.imshow(im_result)
plt.show()
|
apache-2.0
|
MartinDelzant/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
tareqmalas/girih
|
scripts/sisc/paper_plot_thread_scaling_7_pt_var_coeff.py
|
2
|
7265
|
#!/usr/bin/env python
def main():
import sys
raw_data = load_csv(sys.argv[1])
k_l = set()
for k in raw_data:
k_l.add(get_stencil_num(k))
k_l = list(k_l)
# for ts in ['Naive', 'Dynamic-Intra-Diamond']
for k in k_l:
for is_dp in [1]:
for t in [0, 1]:
plot_lines(raw_data, k, is_dp, t)
def get_stencil_num(k):
# add the stencil operator
if k['Stencil Kernel coefficients'] in 'constant':
if int(k['Stencil Kernel semi-bandwidth'])==4:
stencil = 0
else:
stencil = 1
elif 'no-symmetry' in k['Stencil Kernel coefficients']:
stencil = 5
elif 'sym' in k['Stencil Kernel coefficients']:
if int(k['Stencil Kernel semi-bandwidth'])==1:
stencil = 3
else:
stencil = 4
else:
stencil = 2
return stencil
def plot_lines(raw_data, stencil_kernel, is_dp, t):
from operator import itemgetter
import matplotlib.pyplot as plt
import matplotlib
import pylab
from pylab import arange,pi,sin,cos,sqrt
fig_width = 3.8*0.393701 # inches
fig_height = 1.0*fig_width #* 210.0/280.0#433.62/578.16
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize': 7,
'axes.linewidth': 0.5,
'lines.linewidth': 0.75,
'text.fontsize': 7,
'legend.fontsize': 7,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'lines.markersize': 3,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
ts_l = set()
for k in raw_data:
ts_l.add(k['Time stepper orig name'])
ts_l = list(ts_l)
th = set()
for k in raw_data:
th.add(int(k['OpenMP Threads']))
th = list(th)
tb_l = set()
for k in raw_data:
tb_l.add(k['Time unroll'])
tb_l = list(tb_l)
tb_l = map(int,tb_l)
tb_l.sort()
tgs_l = set()
for k in raw_data:
tgs_l.add(k['Thread group size'])
tgs_l = list(tgs_l)
tgs_l = map(int,tgs_l)
tgs_l.sort()
req_fields = [('Thread group size', int), ('WD main-loop RANK0 MStencil/s MAX', float), ('Time stepper orig name', str), ('OpenMP Threads', int), ('MStencil/s MAX', float), ('Time unroll',int), ('Sustained Memory BW', float)]
data = []
for k in raw_data:
tup = {}
# add the general fileds
for f in req_fields:
tup[f[0]] = map(f[1], [k[f[0]]] )[0]
# add the stencil operator
# if k['Stencil Kernel coefficients'] in 'constant':
# if int(k['Stencil Kernel semi-bandwidth'])==4:
# stencil = 0
# else:
# stencil = 1
# elif 'no-symmetry' in k['Stencil Kernel coefficients']:
# stencil = 5
# elif 'sym' in k['Stencil Kernel coefficients']:
# if int(k['Stencil Kernel semi-bandwidth'])==1:
# stencil = 3
# else:
# stencil = 4
# else:
# stencil = 2
# tup['stencil'] = stencil
tup['stencil'] = get_stencil_num(k)
# add the precision information
if k['Precision'] in 'DP':
p = 1
else:
p = 0
tup['Precision'] = p
data.append(tup)
data = sorted(data, key=itemgetter('Time stepper orig name', 'Time unroll', 'Thread group size', 'OpenMP Threads'))
# for i in data: print i
max_single = 0
# fig, ax1 = plt.subplots()
# lns = []
marker = 'o'
x = []
y = []
y_m = []
for k in data:
if ( ('Naive' in k['Time stepper orig name']) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):
if k['OpenMP Threads'] == 1 and max_single < k['MStencil/s MAX']/10**3: max_single = k['MStencil/s MAX']/10**3
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['OpenMP Threads'])
y.append(k['MStencil/s MAX']/10**3)
marker = 'o'
col = 'g'
ts2 = 'Spt.blk.'
if(x) and t==0:
plt.plot(x, y, color=col, marker=marker, linestyle='-', label=ts2)
if(y_m) and t==1:
plt.plot(x, y_m, color=col, marker=marker, linestyle='-', label=ts2)
x = []
y = []
y_m = []
perf_str = 'WD main-loop RANK0 MStencil/s MAX'
for k in data:
if ( ('Diamond' in k['Time stepper orig name']) and (k['Thread group size'] == 10) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['OpenMP Threads'])
y.append(k[perf_str]/10**3)
marker = '*'
markersize = 12
col = 'm'
ts2 = str(10) + 'WD'
if(x) and t==0:
plt.plot(x, y, color=col, marker=marker, markersize=markersize,linestyle='', label=ts2)
if(y_m) and t==1:
plt.plot(x, y_m, color=col, marker=marker, markersize=markersize,linestyle='', label=ts2)
cols = {0:'y', 1:'k', 2:'b', 4:'c', 5:'r', 8:'m'}
markers = {0:'.', 1:'^', 2:'v', 4:'.', 5:'x', 8:'.'}
for tgs in [1,2, 4, 8, 5]:
marker = markers[tgs]
x = []
y = []
y_m = []
for k in data:
if ( ('Diamond' in k['Time stepper orig name']) and (k['Thread group size'] == tgs) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp) ):
if k['OpenMP Threads'] == 1 and max_single < k[perf_str]/10**3: max_single = k[perf_str]/10**3
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['OpenMP Threads'])
y.append(k[perf_str]/10**3)
col = cols[tgs]
ts2 = str(tgs) + 'WD'
if(x) and t==0:
plt.plot(x, y, color=col, marker=marker, linestyle='-', label=ts2)
if(y_m) and t==1:
plt.plot(x, y_m, color=col, marker=marker, linestyle='-', label=ts2)
# add limits
mem_limit=0
# sus_mem_bw = 36500 #SB
sus_mem_bw = 40 #IB
if stencil_kernel == 0:
mem_limit = sus_mem_bw/16
elif stencil_kernel == 1:
mem_limit = sus_mem_bw/12
elif stencil_kernel == 2:
mem_limit = sus_mem_bw/20
if is_dp == 1: mem_limit = mem_limit / 2
if t == 0:
#plt.plot([1, len(th)], [mem_limit, mem_limit], color='g', linestyle='--', label='Spatial blk. limit')
pass
# add ideal scaling
ideal = [i*max_single for i in th]
if t == 0:
plt.plot(th, ideal, color='k', linestyle='--', label='Ideal scaling')
if t == 0:
title = '7_pt_var_all_methods_perf'
# plt.ylabel('GLUP/s')
else:
title = '7_pt_var_all_methods_bw'
# plt.ylabel('GBytes/s')
f_name = title.replace(' ', '_')
plt.xlabel('Threads')
#iif t == 0: plt.legend(loc='best')
plt.grid()
pylab.savefig(f_name+'.png', bbox_inches="tight", pad_inches=0.04)
pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches="tight", pad_inches=0)
#plt.show()
plt.clf()
def load_csv(data_file):
from csv import DictReader
with open(data_file, 'rb') as output_file:
data = DictReader(output_file)
data = [k for k in data]
return data
if __name__ == "__main__":
main()
|
bsd-3-clause
|
gdhungana/desispec
|
py/desispec/qa/qa_plots_ql.py
|
2
|
16381
|
"""
This includes routines to make pdf plots on the qa outputs from quicklook.
"""
import numpy as np
from matplotlib import pyplot as plt
def plot_countspectralbins(qa_dict,outfile):
"""Plot count spectral bins.
While reading from yaml output file, qa_dict is the value to the first top level key, which is the name of that QA
`qa_dict` example::
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57578.78098693542,
'PANAME': 'BOXCAR',
'SPECTROGRAPH': 0,
'VALUE': {'NBINS100': array([ 2575., 2611., 2451., 2495., 2357., 2452., 2528., 2501., 2548., 2461.]),
'NBINS100_AMP': array([ 1249.74, 0. , 1198.01, 0. ]),
'NBINS250': array([ 2503., 2539., 2161., 2259., 2077., 2163., 2284., 2268., 2387., 2210.]),
'NBINS250_AMP': array([ 1149.55, 0. , 1095.02, 0. ]),
'NBINS500': array([ 2307., 2448., 229., 1910., 94., 306., 2056., 1941., 2164., 785.]),
'NBINS500_AMP': array([ 688.85, 0. , 648.75, 0. ])}}}
Args:
qa_dict: dictionary of qa outputs from running qa_quicklook.CountSpectralBins
outfile: Name of figure.
"""
arm=qa_dict["ARM"]
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
paname=qa_dict["PANAME"]
bins100=qa_dict["VALUE"]["NBINS100"]
bins250=qa_dict["VALUE"]["NBINS250"]
bins500=qa_dict["VALUE"]["NBINS500"]
bins100_amp=qa_dict["VALUE"]["NBINS100_AMP"]
bins250_amp=qa_dict["VALUE"]["NBINS250_AMP"]
bins500_amp=qa_dict["VALUE"]["NBINS500_AMP"]
index=np.arange(bins100.shape[0])
fig=plt.figure()
plt.suptitle("Count spectral bins after %s, Camera: %s%s, ExpID: %s"%(paname,arm,spectrograph,expid))
ax1=fig.add_subplot(231)
hist_med=ax1.bar(index,bins100,color='b',align='center')
ax1.set_xlabel('Fiber #',fontsize=10)
ax1.set_ylabel('Counts > 100',fontsize=10)
ax1.tick_params(axis='x',labelsize=10)
ax1.tick_params(axis='y',labelsize=10)
ax2=fig.add_subplot(232)
hist_med=ax2.bar(index,bins250,color='r',align='center')
ax2.set_xlabel('Fiber #',fontsize=10)
ax2.set_ylabel('Counts > 250',fontsize=10)
ax2.tick_params(axis='x',labelsize=10)
ax2.tick_params(axis='y',labelsize=10)
ax3=fig.add_subplot(233)
hist_med=ax3.bar(index,bins500,color='g',align='center')
ax3.set_xlabel('Fiber #',fontsize=10)
ax3.set_ylabel('Counts > 500',fontsize=10)
ax3.tick_params(axis='x',labelsize=10)
ax3.tick_params(axis='y',labelsize=10)
ax4=fig.add_subplot(234)
heatmap1=ax4.pcolor(bins100_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax4.set_xlabel("Bins above 100 counts (per Amp)",fontsize=10)
ax4.tick_params(axis='x',labelsize=10,labelbottom='off')
ax4.tick_params(axis='y',labelsize=10,labelleft='off')
ax4.annotate("Amp 1\n%.1f"%bins100_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax4.annotate("Amp 2\n%.1f"%bins100_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax4.annotate("Amp 3\n%.1f"%bins100_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax4.annotate("Amp 4\n%.1f"%bins100_amp[3],
xy=(1.4,1.4),
fontsize=10
)
ax5=fig.add_subplot(235)
heatmap2=ax5.pcolor(bins250_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax5.set_xlabel("Bins above 250 counts (per Amp)",fontsize=10)
ax5.tick_params(axis='x',labelsize=10,labelbottom='off')
ax5.tick_params(axis='y',labelsize=10,labelleft='off')
ax5.annotate("Amp 1\n%.1f"%bins250_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax5.annotate("Amp 2\n%.1f"%bins250_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax5.annotate("Amp 3\n%.1f"%bins250_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax5.annotate("Amp 4\n%.1f"%bins250_amp[3],
xy=(1.4,1.4),
fontsize=10
)
ax6=fig.add_subplot(236)
heatmap3=ax6.pcolor(bins500_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax6.set_xlabel("Bins above 500 counts (per Amp)",fontsize=10)
ax6.tick_params(axis='x',labelsize=10,labelbottom='off')
ax6.tick_params(axis='y',labelsize=10,labelleft='off')
ax6.annotate("Amp 1\n%.1f"%bins500_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax6.annotate("Amp 2\n%.1f"%bins500_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax6.annotate("Amp 3\n%.1f"%bins500_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax6.annotate("Amp 4\n%.1f"%bins500_amp[3],
xy=(1.4,1.4),
fontsize=10
)
plt.tight_layout()
fig.savefig(outfile)
def plot_countpix(qa_dict,outfile):
"""
plot pixel counts above some threshold
qa_dict example:
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57578.780697648355,
'PANAME': 'PREPROC',
'SPECTROGRAPH': 0,
'VALUE': {'NPIX100': 0,
'NPIX100_AMP': [254549, 0, 242623, 0],
'NPIX3SIG': 3713,
'NPIX3SIG_AMP': [128158, 2949, 132594, 3713],
'NPIX500': 0,
'NPIX500_AMP': [1566, 0, 1017, 0]}}}
args: qa_dict : qa dictionary from countpix qa
outfile : pdf file of the plot
"""
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
arm=qa_dict["ARM"]
paname=qa_dict["PANAME"]
count3sig_amp=np.array(qa_dict["VALUE"]["NPIX3SIG_AMP"])
count100_amp=np.array(qa_dict["VALUE"]["NPIX100_AMP"])
count500_amp=np.array(qa_dict["VALUE"]["NPIX500_AMP"])
fig=plt.figure()
plt.suptitle("Count pixels after %s, Camera: %s%s, ExpID: %s"%(paname,arm,spectrograph,expid))
ax1=fig.add_subplot(221)
heatmap1=ax1.pcolor(count3sig_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax1.set_xlabel("Counts above 3sig. (per Amp)",fontsize=10)
ax1.tick_params(axis='x',labelsize=10,labelbottom='off')
ax1.tick_params(axis='y',labelsize=10,labelleft='off')
ax1.annotate("Amp 1\n%.1f"%count3sig_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax1.annotate("Amp 2\n%.1f"%count3sig_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax1.annotate("Amp 3\n%.1f"%count3sig_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax1.annotate("Amp 4\n%.1f"%count3sig_amp[3],
xy=(1.4,1.4),
fontsize=10
)
ax2=fig.add_subplot(222)
heatmap2=ax2.pcolor(count100_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax2.set_xlabel("Counts above 100 (per Amp)",fontsize=10)
ax2.tick_params(axis='x',labelsize=10,labelbottom='off')
ax2.tick_params(axis='y',labelsize=10,labelleft='off')
ax2.annotate("Amp 1\n%.1f"%count100_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax2.annotate("Amp 2\n%.1f"%count100_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax2.annotate("Amp 3\n%.1f"%count100_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax2.annotate("Amp 4\n%.1f"%count100_amp[3],
xy=(1.4,1.4),
fontsize=10
)
ax3=fig.add_subplot(223)
heatmap3=ax3.pcolor(count500_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax3.set_xlabel("Counts above 500 (per Amp)",fontsize=10)
ax3.tick_params(axis='x',labelsize=10,labelbottom='off')
ax3.tick_params(axis='y',labelsize=10,labelleft='off')
ax3.annotate("Amp 1\n%.1f"%count500_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax3.annotate("Amp 2\n%.1f"%count500_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax3.annotate("Amp 3\n%.1f"%count500_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax3.annotate("Amp 4\n%.1f"%count500_amp[3],
xy=(1.4,1.4),
fontsize=10
)
fig.savefig(outfile)
def plot_bias_overscan(qa_dict,outfile):
"""
map of bias from overscan from 4 regions of CCD
qa_dict example:
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57578.780704701225,
'PANAME': 'PREPROC',
'SPECTROGRAPH': 0,
'VALUE': {'BIAS': -0.0080487558302569373,
'BIAS_AMP': array([-0.01132324, -0.02867701, -0.00277266, 0.0105779 ])}}
args: qa_dict : qa dictionary from countpix qa
outfile : pdf file of the plot
"""
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
arm=qa_dict["ARM"]
paname=qa_dict["PANAME"]
bias_amp=qa_dict["VALUE"]["BIAS_AMP"]
fig=plt.figure()
plt.suptitle("Bias from overscan region after %s, Camera: %s%s, ExpID: %s"%(paname,arm,spectrograph,expid))
ax1=fig.add_subplot(111)
heatmap1=ax1.pcolor(bias_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax1.set_xlabel("Avg. bias value (per Amp)",fontsize=10)
ax1.tick_params(axis='x',labelsize=10,labelbottom='off')
ax1.tick_params(axis='y',labelsize=10,labelleft='off')
ax1.annotate("Amp 1\n%.3f"%bias_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax1.annotate("Amp 2\n%.3f"%bias_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax1.annotate("Amp 3\n%.3f"%bias_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax1.annotate("Amp 4\n%.3f"%bias_amp[3],
xy=(1.4,1.4),
fontsize=10
)
fig.savefig(outfile)
def plot_RMS(qa_dict,outfile):
"""Plot RMS
`qa_dict` example:
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57581.91467038749,
'PANAME': 'PREPROC',
'SPECTROGRAPH': 0,
'VALUE': {'RMS': 40.218151021598679,
'RMS_AMP': array([ 55.16847779, 2.91397089, 55.26686528, 2.91535373])}}
Args:
qa_dict: dictionary of qa outputs from running qa_quicklook.Get_RMS
outfile: Name of plot output file
"""
rms_amp=qa_dict["VALUE"]["RMS_AMP"]
arm=qa_dict["ARM"]
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
mjd=qa_dict["MJD"]
pa=qa_dict["PANAME"]
fig=plt.figure()
plt.suptitle("RMS image counts per amplifier, Camera: %s%s, ExpID: %s"%(arm,spectrograph,expid))
ax1=fig.add_subplot(111)
heatmap1=ax1.pcolor(rms_amp.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax1.set_xlabel("RMS (per Amp)",fontsize=10)
ax1.tick_params(axis='x',labelsize=10,labelbottom='off')
ax1.tick_params(axis='y',labelsize=10,labelleft='off')
ax1.annotate("Amp 1\n%.3f"%rms_amp[0],
xy=(0.4,0.4),
fontsize=10
)
ax1.annotate("Amp 2\n%.3f"%rms_amp[1],
xy=(1.4,0.4),
fontsize=10
)
ax1.annotate("Amp 3\n%.3f"%rms_amp[2],
xy=(0.4,1.4),
fontsize=10
)
ax1.annotate("Amp 4\n%.3f"%rms_amp[3],
xy=(1.4,1.4),
fontsize=10
)
fig.savefig(outfile)
def plot_sky_continuum(qa_dict,outfile):
"""
plot mean sky continuum from lower and higher wavelength range for each fiber and accross amps
example qa_dict:
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57582.49011861168,
'PANAME': 'APPLY_FIBERFLAT',
'SPECTROGRAPH': 0,
'VALUE': {'SKYCONT': 359.70078667259668,
'SKYCONT_AMP': array([ 374.19163643, 0. , 344.76184662, 0. ]),
'SKYCONT_FIBER': [357.23814787655738, 358.14982775192709, 359.34380640332847, 361.55526717275529,
360.46690568746544, 360.49561926858325, 359.08761654248656, 361.26910267767016],
'SKYFIBERID': [4, 19, 30, 38, 54, 55, 57, 62]}}
args: qa_dict: dictionary from sky continuum QA
outfile: pdf file to save the plot
"""
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
arm=qa_dict["ARM"]
paname=qa_dict["PANAME"]
skycont_fiber=np.array(qa_dict["VALUE"]["SKYCONT_FIBER"])
skycont_amps=np.array(qa_dict["VALUE"]["SKYCONT_AMP"])
index=np.arange(skycont_fiber.shape[0])
fiberid=qa_dict["VALUE"]["SKYFIBERID"]
fig=plt.figure()
plt.suptitle("Mean Sky Continuum after %s, Camera: %s%s, ExpID: %s"%(paname,arm,spectrograph,expid))
ax1=fig.add_subplot(211)
hist_med=ax1.bar(index,skycont_fiber,color='b',align='center')
ax1.set_xlabel('SKY fibers',fontsize=10)
ax1.set_ylabel('Sky Continuum',fontsize=10)
ax1.tick_params(axis='x',labelsize=10)
ax1.tick_params(axis='y',labelsize=10)
ax1.set_xticks(index)
ax1.set_xticklabels(fiberid)
ax2=fig.add_subplot(212)
heatmap1=ax2.pcolor(skycont_amps.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax2.set_xlabel("Avg. sky continuum (per Amp)",fontsize=10)
ax2.tick_params(axis='x',labelsize=10,labelbottom='off')
ax2.tick_params(axis='y',labelsize=10,labelleft='off')
ax2.annotate("Amp 1\n%.1f"%skycont_amps[0],
xy=(0.4,0.4),
fontsize=10
)
ax2.annotate("Amp 2\n%.1f"%skycont_amps[1],
xy=(1.4,0.4),
fontsize=10
)
ax2.annotate("Amp 3\n%.1f"%skycont_amps[2],
xy=(0.4,1.4),
fontsize=10
)
ax2.annotate("Amp 4\n%.1f"%skycont_amps[3],
xy=(1.4,1.4),
fontsize=10
)
fig.savefig(outfile)
def plot_SNR(qa_dict,outfile):
"""Plot SNR
`qa_dict` example::
{'ARM': 'r',
'EXPID': '00000006',
'MJD': 57578.78131121235,
'PANAME': 'SKYSUB',
'SPECTROGRAPH': 0,
'VALUE': {'MEDIAN_AMP_SNR': array([ 11.28466596, 0. , 13.18927372, 0. ]),
'MEDIAN_SNR': array([ 26.29012459, 35.02498105, 3.30635973, 7.69106173,
0.586899 , 3.59830798, 11.75768833, 8.276959 , 16.70907383, 4.82177165])}}}
Args:
qa_dict: dictionary of qa outputs from running qa_quicklook.Calculate_SNR
outfile: Name of figure.
"""
med_snr=qa_dict["VALUE"]["MEDIAN_SNR"]
med_amp_snr=qa_dict["VALUE"]["MEDIAN_AMP_SNR"]
index=np.arange(med_snr.shape[0])
arm=qa_dict["ARM"]
spectrograph=qa_dict["SPECTROGRAPH"]
expid=qa_dict["EXPID"]
paname=qa_dict["PANAME"]
fig=plt.figure()
plt.suptitle("Signal/Noise after %s, Camera: %s%s, ExpID: %s"%(paname,arm,spectrograph,expid))
ax1=fig.add_subplot(211)
hist_med=ax1.bar(index,med_snr)
ax1.set_xlabel('Fiber #',fontsize=10)
ax1.set_ylabel('Median S/N',fontsize=10)
ax1.tick_params(axis='x',labelsize=10)
ax1.tick_params(axis='y',labelsize=10)
ax2=fig.add_subplot(212)
heatmap_med=ax2.pcolor(med_amp_snr.reshape(2,2).T,cmap=plt.cm.coolwarm)
ax2.set_xlabel("Avg. Median S/N (per Amp)",fontsize=10)
ax2.tick_params(axis='x',labelsize=10,labelbottom='off')
ax2.tick_params(axis='y',labelsize=10,labelleft='off')
ax2.annotate("Amp 1\n%.3f"%med_amp_snr[0],
xy=(0.4,0.4), #- Full scale is 2
fontsize=10
)
ax2.annotate("Amp 2\n%.3f"%med_amp_snr[1],
xy=(1.4,0.4),
fontsize=10
)
ax2.annotate("Amp 3\n%.3f"%med_amp_snr[2],
xy=(0.4,1.4),
fontsize=10
)
ax2.annotate("Amp 4\n%.3f"%med_amp_snr[3],
xy=(1.4,1.4),
fontsize=10
)
fig.savefig(outfile)
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
examples/ensemble/plot_stack_predictors.py
|
9
|
9085
|
"""
=================================
Combine predictors using stacking
=================================
.. currentmodule:: sklearn
Stacking refers to a method to blend estimators. In this strategy, some
estimators are individually fitted on some training data while a final
estimator is trained using the stacked predictions of these base estimators.
In this example, we illustrate the use case in which different regressors are
stacked together and a final linear penalized regressor is used to output the
prediction. We compare the performance of each individual regressor with the
stacking strategy. Stacking slightly improves the overall performance.
"""
print(__doc__)
# Authors: Guillaume Lemaitre <[email protected]>
# Maria Telenczuk <https://github.com/maikia>
# License: BSD 3 clause
# %%
# Download the dataset
##############################################################################
#
# We will use `Ames Housing`_ dataset which was first compiled by Dean De Cock
# and became better known after it was used in Kaggle challenge. It is a set
# of 1460 residential homes in Ames, Iowa, each described by 80 features. We
# will use it to predict the final logarithmic price of the houses. In this
# example we will use only 20 most interesting features chosen using
# GradientBoostingRegressor() and limit number of entries (here we won't go
# into the details on how to select the most interesting features).
#
# The Ames housing dataset is not shipped with scikit-learn and therefore we
# will fetch it from `OpenML`_.
#
# .. _`Ames Housing`: http://jse.amstat.org/v19n3/decock.pdf
# .. _`OpenML`: https://www.openml.org/d/42165
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.utils import shuffle
def load_ames_housing():
df = fetch_openml(name="house_prices", as_frame=True)
X = df.data
y = df.target
features = ['YrSold', 'HeatingQC', 'Street', 'YearRemodAdd', 'Heating',
'MasVnrType', 'BsmtUnfSF', 'Foundation', 'MasVnrArea',
'MSSubClass', 'ExterQual', 'Condition2', 'GarageCars',
'GarageType', 'OverallQual', 'TotalBsmtSF', 'BsmtFinSF1',
'HouseStyle', 'MiscFeature', 'MoSold']
X = X[features]
X, y = shuffle(X, y, random_state=0)
X = X[:600]
y = y[:600]
return X, np.log(y)
X, y = load_ames_housing()
# %%
# Make pipeline to preprocess the data
##############################################################################
#
# Before we can use Ames dataset we still need to do some preprocessing.
# First, the dataset has many missing values. To impute them, we will exchange
# categorical missing values with the new category 'missing' while the
# numerical missing values with the 'mean' of the column. We will also encode
# the categories with either :class:`~sklearn.preprocessing.OneHotEncoder
# <sklearn.preprocessing.OneHotEncoder>` or
# :class:`~sklearn.preprocessing.OrdinalEncoder
# <sklearn.preprocessing.OrdinalEncoder>` depending for which type of model we
# will use them (linear or non-linear model). To facilitate this preprocessing
# we will make two pipelines.
# You can skip this section if your data is ready to use and does
# not need preprocessing
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
cat_cols = X.columns[X.dtypes == 'O']
num_cols = X.columns[X.dtypes == 'float64']
categories = [
X[column].unique() for column in X[cat_cols]]
for cat in categories:
cat[cat == None] = 'missing' # noqa
cat_proc_nlin = make_pipeline(
SimpleImputer(missing_values=None, strategy='constant',
fill_value='missing'),
OrdinalEncoder(categories=categories)
)
num_proc_nlin = make_pipeline(SimpleImputer(strategy='mean'))
cat_proc_lin = make_pipeline(
SimpleImputer(missing_values=None,
strategy='constant',
fill_value='missing'),
OneHotEncoder(categories=categories)
)
num_proc_lin = make_pipeline(
SimpleImputer(strategy='mean'),
StandardScaler()
)
# transformation to use for non-linear estimators
processor_nlin = make_column_transformer(
(cat_proc_nlin, cat_cols),
(num_proc_nlin, num_cols),
remainder='passthrough')
# transformation to use for linear estimators
processor_lin = make_column_transformer(
(cat_proc_lin, cat_cols),
(num_proc_lin, num_cols),
remainder='passthrough')
# %%
# Stack of predictors on a single data set
##############################################################################
#
# It is sometimes tedious to find the model which will best perform on a given
# dataset. Stacking provide an alternative by combining the outputs of several
# learners, without the need to choose a model specifically. The performance of
# stacking is usually close to the best model and sometimes it can outperform
# the prediction performance of each individual model.
#
# Here, we combine 3 learners (linear and non-linear) and use a ridge regressor
# to combine their outputs together.
#
# Note: although we will make new pipelines with the processors which we wrote
# in the previous section for the 3 learners, the final estimator RidgeCV()
# does not need preprocessing of the data as it will be fed with the already
# preprocessed output from the 3 learners.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
lasso_pipeline = make_pipeline(processor_lin,
LassoCV())
rf_pipeline = make_pipeline(processor_nlin,
RandomForestRegressor(random_state=42))
gradient_pipeline = make_pipeline(
processor_nlin,
HistGradientBoostingRegressor(random_state=0))
estimators = [('Random Forest', rf_pipeline),
('Lasso', lasso_pipeline),
('Gradient Boosting', gradient_pipeline)]
stacking_regressor = StackingRegressor(estimators=estimators,
final_estimator=RidgeCV())
# %%
# Measure and plot the results
##############################################################################
#
# Now we can use Ames Housing dataset to make the predictions. We check the
# performance of each individual predictor as well as of the stack of the
# regressors.
#
# The function ``plot_regression_results`` is used to plot the predicted and
# true targets.
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate, cross_val_predict
def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):
"""Scatter plot of the predicted vs true targets."""
ax.plot([y_true.min(), y_true.max()],
[y_true.min(), y_true.max()],
'--r', linewidth=2)
ax.scatter(y_true, y_pred, alpha=0.2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.set_xlim([y_true.min(), y_true.max()])
ax.set_ylim([y_true.min(), y_true.max()])
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
extra = plt.Rectangle((0, 0), 0, 0, fc="w", fill=False,
edgecolor='none', linewidth=0)
ax.legend([extra], [scores], loc='upper left')
title = title + '\n Evaluation in {:.2f} seconds'.format(elapsed_time)
ax.set_title(title)
fig, axs = plt.subplots(2, 2, figsize=(9, 7))
axs = np.ravel(axs)
for ax, (name, est) in zip(axs, estimators + [('Stacking Regressor',
stacking_regressor)]):
start_time = time.time()
score = cross_validate(est, X, y,
scoring=['r2', 'neg_mean_absolute_error'],
n_jobs=-1, verbose=0)
elapsed_time = time.time() - start_time
y_pred = cross_val_predict(est, X, y, n_jobs=-1, verbose=0)
plot_regression_results(
ax, y, y_pred,
name,
(r'$R^2={:.2f} \pm {:.2f}$' + '\n' + r'$MAE={:.2f} \pm {:.2f}$')
.format(np.mean(score['test_r2']),
np.std(score['test_r2']),
-np.mean(score['test_neg_mean_absolute_error']),
np.std(score['test_neg_mean_absolute_error'])),
elapsed_time)
plt.suptitle('Single predictors versus stacked predictors')
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.show()
# %%
# The stacked regressor will combine the strengths of the different regressors.
# However, we also see that training the stacked regressor is much more
# computationally expensive.
|
bsd-3-clause
|
liyu1990/sklearn
|
examples/linear_model/plot_sgd_penalties.py
|
124
|
1877
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
|
bsd-3-clause
|
hainm/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
251
|
2022
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
CtraliePubs/SOCGMM2016_SlidingWindowVideo
|
PCADemo/doSwordPCAVideo.py
|
1
|
4966
|
#Code to sample points from a polygon mesh
import sys
sys.path.append("../")
sys.path.append("../S3DGLPy")
from SlidingWindow1D import *
from sklearn.decomposition import PCA
import numpy as np
import scipy
from Primitives3D import *
from PolyMesh import *
from MeshCanvas import *
import matplotlib.pyplot as plt
#########################################################
## UTILITY FUNCTIONS ##
#########################################################
class PCAGLCanvas(BasicMeshCanvas):
def __init__(self, parent, Y, C, angles, stds, prefix):
BasicMeshCanvas.__init__(self, parent)
self.Y = Y #Geometry
self.YMean = np.mean(Y, 0)
self.C = C #Colors
self.YBuf = vbo.VBO(np.array(self.Y, dtype=np.float32))
self.CBuf = vbo.VBO(np.array(self.C, dtype=np.float32))
self.angles = angles #Angles to rotate the camera through as the trajectory is going
self.stds = stds
self.prefix = prefix
self.frameNum = 0
#Initialize sphere mesh
self.bbox = BBox3D()
self.bbox.fromPoints(Y)
self.camera.centerOnBBox(self.bbox, theta = angles[0, 0], phi = angles[0, 1]) #theta = -math.pi/2, phi = math.pi/2)
self.Refresh()
def setupPerspectiveMatrix(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(180.0*self.camera.yfov/M_PI, float(self.size.x)/self.size.y, 0.001, 100)
def repaint(self):
self.setupPerspectiveMatrix()
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDisable(GL_LIGHTING)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
self.YBuf.bind()
glVertexPointerf(self.YBuf)
self.CBuf.bind()
glColorPointerf(self.CBuf)
#Rotate camera
self.camera.theta = self.angles[self.frameNum, 0]
self.camera.phi = self.angles[self.frameNum, 1]
self.camera.updateVecsFromPolar()
#Set up modelview matrix
self.camera.gotoCameraFrame()
glDrawArrays(GL_POINTS, 0, self.Y.shape[0])
#First principal component
if self.frameNum > 200 and self.frameNum < 360:
glLineWidth(10.0)
else:
glLineWidth(2.0)
glColor3f(1, 0, 0)
glBegin(GL_LINES)
glVertex3f(self.YMean[0], self.YMean[1], self.YMean[2])
glVertex3f(self.YMean[0], self.YMean[1]-self.stds[1]*2, self.YMean[1])
glEnd()
if self.frameNum >= 360 and self.frameNum < 470:
glLineWidth(10.0)
else:
glLineWidth(2.0)
glColor3f(0, 1, 0)
glBegin(GL_LINES)
glVertex3f(self.YMean[0], self.YMean[1], self.YMean[2])
glVertex3f(self.YMean[0]-self.stds[0]*3, self.YMean[1], self.YMean[1])
glEnd()
glLineWidth(3.0)
glColor3f(0, 0, 1)
glBegin(GL_LINES)
glVertex3f(self.YMean[0], self.YMean[1], self.YMean[2])
glVertex3f(self.YMean[0], self.YMean[1], self.YMean[1]+self.stds[2]*3)
glEnd()
self.CBuf.unbind()
self.YBuf.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
saveImageGL(self, "%s%i.png"%(self.prefix, self.frameNum))
if self.frameNum < self.Y.shape[0] - 1:
self.frameNum += 1
self.Refresh()
else:
self.parent.Destroy()
self.SwapBuffers()
def doPCAGLPlot(Y, C, angles, stds, prefix):
app = wx.PySimpleApp()
frame = wx.Frame(None, wx.ID_ANY, "PCA GL Canvas", DEFAULT_POS, (800, 800))
g = PCAGLCanvas(frame, Y, C, angles, stds, prefix)
frame.canvas = g
frame.Show()
app.MainLoop()
app.Destroy()
if __name__ == '__main__':
NRandSamples = 10000 #You can tweak this number
np.random.seed(100) #For repeatable results randomly sampling
m = PolyMesh()
m.loadFile("sword2.off")
(Y, Ns) = m.randomlySamplePoints(NRandSamples)
Y = Y.T
Y = Y - np.mean(Y, 0)[None, :]
C = np.array(Y)
C = C - np.min(C, 0)[None, :]
C = C/np.max(C, 0)[None, :]
pca = PCA()
Y = pca.fit_transform(Y)
Y = Y/np.max(np.abs(Y))
plt.scatter(Y[:, 0], Y[:, 1], 20, C, edgecolors='none')
plt.axes().set_aspect('equal', 'datalim')
plt.xlabel('First Principal Axis')
plt.xlabel('Second Principal Axis')
plt.title('2D PCA of Sword Point Cloud')
plt.savefig("SwordPCA.png", dpi=300, bbox_inches='tight')
Y = Y[:, [1, 0, 2]]
stds = np.std(Y, 0)
#Output rotation video
NFrames = 500
angles = np.pi/1.5*np.ones((NFrames, 2))
angles[:, 0] = np.linspace(0, 2*np.pi, NFrames)
angles[:, 1] = np.linspace(np.pi/1.5, np.pi/1.3, NFrames)
doPCAGLPlot(Y, C, angles, stds, "Points")
|
apache-2.0
|
Winand/pandas
|
pandas/core/indexes/datetimes.py
|
1
|
79180
|
# pylint: disable=E1101
from __future__ import division
import operator
import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.base import _shared_docs
from pandas.core.dtypes.common import (
_NS_DTYPE, _INT64_DTYPE,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
is_integer, is_float,
is_integer_dtype,
is_datetime64_ns_dtype,
is_period_dtype,
is_bool_dtype,
is_string_dtype,
is_list_like,
is_scalar,
pandas_dtype,
_ensure_int64)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
import pandas.core.dtypes.concat as _concat
from pandas.errors import PerformanceWarning
from pandas.core.common import _values_from_object, _maybe_box
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index, Float64Index
import pandas.compat as compat
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.core.indexes.datetimelike import (
DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin)
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.core.tools.datetimes import (
parse_time_string, normalize_date, to_time)
from pandas.core.tools.timedeltas import to_timedelta
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.core.tools.datetimes as tools
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp, period as libperiod)
from pandas._libs.tslibs import timezones
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in self._bool_ops:
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = (self.freq.kwds.get('startingMonth',
self.freq.kwds.get('month', 12))
if self.freq else 12)
result = libts.get_start_end_field(values, field, self.freqstr,
month_kw)
else:
result = libts.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = libts.get_date_name_field(values, field)
result = self._maybe_mask_results(result)
else:
result = libts.get_date_field(values, field)
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if (isinstance(other, datetime) or
isinstance(other, compat.string_types)):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == libts.iNaT
else:
o_mask = other.view('i8') == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz', None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',
**kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = libindex.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
# define my properties & methods for delegation
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'weekday', 'dayofweek',
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'microsecond',
'nanosecond']
_other_ops = ['date', 'time']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = ['to_period', 'tz_localize',
'tz_convert',
'normalize', 'strftime', 'round', 'floor',
'ceil']
_is_numeric_dtype = False
_infer_as_myclass = True
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
# This allows to later ensure that the 'copy' parameter is honored:
if isinstance(data, Index):
ref_to_data = data._data
else:
ref_to_data = data
if name is None and hasattr(data, 'name'):
name = data.name
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
# if dtype has an embeded tz, capture it
if dtype is not None:
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
dtz = getattr(dtype, 'tz', None)
if dtz is not None:
if tz is not None and str(tz) != str(dtz):
raise ValueError("cannot supply both a tz and a dtype"
" with a tz")
tz = dtz
except TypeError:
pass
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
elif isinstance(data, ABCSeries):
data = data._values
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data)):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
elif data.tz is None:
data = data.tz_localize(tz, ambiguous=ambiguous)
else:
# the tz's must match
if str(tz) != str(data.tz):
msg = ('data is already tz-aware {0}, unable to '
'set specified tz: {1}')
raise TypeError(msg.format(data.tz, tz))
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = libts.cast_to_nanoseconds(data)
else:
subarr = data
else:
# must be integer dtype otherwise
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64)
subarr = data.view(_NS_DTYPE)
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = timezones.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = libts.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
# dtype must be coerced to DatetimeTZDtype above
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None,
freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'dates does not conform to passed '
'frequency {1}'
.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and '
'periods, exactly two must be specified')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = timezones.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = timezones.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not (timezones.get_timezone(inferred_tz) ==
timezones.get_timezone(tz)):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = libts.tz_localize_to_utc(_ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return libts.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = libts.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None,
dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if getattr(values, 'dtype', None) is None:
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs)
values = np.array(values, copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = timezones.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return timezones.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError(
'Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return libts.ints_to_pydatetime(self.asi8, self.tz)
@cache_readonly
def _is_dates_only(self):
from pandas.io.formats.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
if other is libts.NaT:
return self._nat_new(box=True)
raise TypeError("cannot add a datelike to a DatetimeIndex")
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError("DatetimeIndex subtraction must have the same "
"timezones or no timezones")
result = self._sub_datelike_dti(other)
elif isinstance(other, (libts.Timestamp, datetime)):
other = Timestamp(other)
if other is libts.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same "
"timezones or no timezones")
else:
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,
fill_value=libts.iNaT)
else:
raise TypeError("cannot subtract DatetimeIndex and {typ}"
.format(typ=type(other).__name__))
return TimedeltaIndex(result, name=self.name, copy=False)
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = libts.iNaT
return new_values.view('i8')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return libts.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif is_datetime64_ns_dtype(dtype):
if self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif copy is True:
return self.copy()
return self
elif is_string_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return libts.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz),
index=self._shallow_copy(),
name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return libts.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.core.indexes.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = ("You must pass a freq argument as "
"current index has none.")
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calcuates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
.. versionadded:: 0.17.0
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq)
.to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers, sort=sort)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex) and
self.offset == other.offset and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propagate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, l)
converted = libts.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = libts.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = libts.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59,
59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(),
tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if (is_monotonic and reso in ['day', 'hour', 'minute', 'second'] and
self._resolution >= Resolution.get_reso(reso)):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
t2.value < stamps[0]) or
((use_rhs and t1.value > stamps[-1] and
t2.value > stamps[-1])))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(
t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(
t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series),
key, tz=self.tz)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice', label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq,
doc="get/set the frequency of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. versionadded:: 0.18.1")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"Logical indicating if last day of year (defined by frequency)")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"Logical indicating if the date belongs to a leap year")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
return self._maybe_mask_results(libalgos.arrmap_object(
self.asobject.values,
lambda x: np.nan if x is libts.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
return self._maybe_mask_results(libalgos.arrmap_object(
self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = libts.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
@Substitution(klass='DatetimeIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_NS_DTYPE, copy=False)
else:
value = _to_m8(value, tz=self.tz)
return self.values.searchsorted(value, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return libts.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return libperiod.resolution(self.asi8, self.tz)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = libts.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = libts.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone
.. versionadded:: 0.19.0
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = libts.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = libts.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM).
Return values of the index between two times. If start_time or
end_time are strings then tseries.tools.to_time is used to convert to
a time object.
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : TimeSeries
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the
default frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
weekmask : string, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
if freq == 'C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(libts.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
|
bsd-3-clause
|
mfxox/ILCC
|
ILCC/pcd_corners_est.py
|
1
|
35494
|
import numpy as np
from numpy import linalg as LA
from numpy.linalg import norm, inv
import shutil, copy
from sklearn.decomposition import PCA
from scipy import spatial
import matplotlib.path as mplPath
import transforms3d
from scipy.optimize import minimize
import cPickle
import os
from ast import literal_eval as make_tuple
from multiprocessing import Pool
import re
import warnings
from scipy import stats
import config
params = config.default_params()
# marker length of long side and short side
marker_size = make_tuple(params["pattern_size"])
marker_l = params["grid_length"] * marker_size[1]
marker_s = params["grid_length"] * marker_size[0]
marker_th_l_max = marker_l * 1.6
marker_th_s_max = marker_s * 1.6
marker_th_l_min = marker_l * 0.8
marker_th_s_min = marker_s * 0.8
# if the point clouds haven't been segmented, they will be processed
# not_segmented = params['not_segmented']
not_segmented = True
debug = False
# get vertical and horizontal scan resolution for jdc and agglomeration
if params['LiDAR_type'] == 'hdl32':
h_coef = 2 * np.sin(np.deg2rad(360. / (70000. / 32.)) / 2)
v_coef = 2 * np.sin(np.deg2rad(41.34 / 31.) / 2.)
elif params['LiDAR_type'] == 'hdl64':
h_coef = 2 * np.sin(np.deg2rad(0.08) / 2)
v_coef = 2 * np.sin(np.deg2rad(0.4 / 2.))
elif params['LiDAR_type'] == 'vlp16_puck':
h_coef = 2 * np.sin(np.deg2rad(0.25) / 2)
v_coef = 2 * np.sin(np.deg2rad(2) / 2.)
elif params['LiDAR_type'] == 'vlp32c':
h_coef = 2 * np.sin(np.deg2rad(0.25) / 2)
v_coef = 2 * np.sin(np.deg2rad(0.333) / 2.)#non-linear [4.667, -4. , -3.667, -3.333, -3. , -2.667, -2.333, -2. ,-1.333, 0.667, 1. , 1.667, 2.333, 3.333, 4.667, 7. , 10.333, 15.]
#delte_ang= [0.667, 0.333, 0.334, 0.333, 0.333, 0.334, 0.333, 0.667, 2. , 0.333, 0.667, 0.666, 1. ,1.334, 2.333, 3.333, 4.6673]
else:
AssertionError("Please input the right LiDAR_type in the config.yaml")
# scanline segment class segmented scanline by scanline
class jdc_segment:
def __init__(self, laser_id=-1, segment_id=-1, points_xyz=np.array([-1, -1, -1]), points_rgb=np.array([255, 0, 0])):
self.laser_id = laser_id
self.segment_id = segment_id
self.points_xyz = points_xyz
self.points_rgb = points_rgb
self.is_potential = False
self.centroid = np.array([0, 0, 0])
def update_centroid(self): # calc the centroid of a segment by calculating the average of points in seg
self.centroid = self.points_xyz.mean(0)
def calc_integrate_len(self):
points_num = self.points_xyz.shape[0]
total_length = 0
for i in xrange(points_num - 1):
total_length = total_length + LA.norm(self.points_xyz[i] - self.points_xyz[i + 1])
return total_length
def get_points_num(self):
return self.points_xyz.shape[0]
def calc_aver_angle_dif(self):
points_num = self.points_xyz.shape[0]
ang_dif_list = list()
if points_num < 3:
ang_dif_list = list([1, 1, 1])
# print "points_num="+str(points_num)
else:
for i in np.arange(1, points_num - 1):
a = (self.points_xyz[i] - self.points_xyz[i - 1]) * 100
b = (self.points_xyz[i + 1] - self.points_xyz[i]) * 100
ang_dif_cos = np.dot(a, b) / (LA.norm(a) * LA.norm(b))
ang_dif_list.append(ang_dif_cos)
if len(ang_dif_list) > 1:
ang_dif_list.remove(min(ang_dif_list))
return np.array(ang_dif_list).mean()
# segment class agglomerated from the scanline segments
class jdc_segments_collection:
def __init__(self):
self.segs_list = list()
self.__plane_detection_points_thre = 30
self.__normals_list = list()
self.__jdc_thre_ratio = params['jdc_thre_ratio']
self.__jdc_angle_thre = 0.5
self.__csv_path = ""
self.__palnar_normal_num = 100
self.ransac_distance_threshold = 0.1
self.__human_length_th_lower = 0
self.__human_length_th_upper = 60
self.__point_in_plane_threshold = 0.01
self.__random_seg_color = True
self.__agglomerative_cluster_th_ratio = params['agglomerative_cluster_th_ratio']
self.l = params['laser_beams_num']
self.__g_th = -1000
self.__r_th = -1000
self.__horizontal_scan_coef = h_coef
self.__vertical_scan_coef = v_coef
def set_random_color(self, t_f):
self.__random_seg_color = t_f
def add_seg(self, jdc_seg):
self.segs_list.append(jdc_seg)
def set_csv_path(self, csv_path):
self.__csv_path = csv_path
def is_point_in_plane(self, points_arr):
planes_num = len(self.__normals_list)
result = False
for i in xrange(planes_num):
plane_normal = self.__normals_list[i]
error = abs(np.asmatrix(np.array(plane_normal)[:3]) * points_arr.T + plane_normal[3]).mean()
# print error
# print "test"
if error < self.__point_in_plane_threshold:
result = True
break
return result
def exact_planar_normals(self):
import pcl
if self.__csv_path == "":
print "csv file path is not spcified!"
raw_data = np.genfromtxt(self.__csv_path, delimiter=",", skip_header=1)
points_xyz_arr = np.array(raw_data[:, :3], dtype=np.float32)
points_cloud = pcl.PointCloud()
points_cloud.from_array(points_xyz_arr)
for i in xrange(self.__palnar_normal_num):
seg = points_cloud.make_segmenter()
seg.set_optimize_coefficients(True)
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(self.ransac_distance_threshold)
indices, model = seg.segment()
if len(indices) < self.__plane_detection_points_thre:
break
# model turns Hessian Normal Form of a plane in 3D
# http://mathworld.wolfram.com/HessianNormalForm.html
self.__normals_list.append(model)
tmp = points_cloud.to_array()
tmp = np.delete(tmp, indices, 0)
points_cloud.from_array(tmp)
# show_xyzrgb_points_vtk(tmp)
def get_potential_segments(self):
laser_id_col_ind = 4
with open(self.__csv_path) as f:
first_line = f.readline()
header_itmes = first_line.split(",")
for i in xrange(len(header_itmes)):
if header_itmes[i].find("laser_id") > -1:
print "laser_id is found in ", i, "-th colunm!"
laser_id_col_ind = i
break
else:
warnings.warn(
"laser_id is not found in the hearder of the csv file. This may cause the point cloud failed to segment.",
UserWarning)
raw_data = np.genfromtxt(self.__csv_path, delimiter=",", skip_header=1)
self.__r_th = max(raw_data[:, 2])
self.__g_th = min(raw_data[:, 2])
l = self.l
for m in xrange(l):
order = np.where(raw_data[:, laser_id_col_ind] == m) # the 4-th column means the laser id
temp_data = raw_data[order[0]][:, :3] # exact XYZ data with laser_id=i
data_arr_by_id_list = temp_data
total_points_num = data_arr_by_id_list.shape[0]
start_point_pos = 0
stop_point_pos = 0
first_found = False
first_jdc = -1
least_points_num = 0
for i in xrange(total_points_num):
# print i
a = data_arr_by_id_list[i, :]
if i < total_points_num - 1:
b = data_arr_by_id_list[i + 1, :]
else:
b = data_arr_by_id_list[0, :]
dis = LA.norm(a - b)
current_thre = self.__horizontal_scan_coef * LA.norm(a) * self.__jdc_thre_ratio
if dis >= current_thre:
stop_point_pos = i
jdc_seg = jdc_segment()
jdc_seg.laser_id = m
jdc_seg.segment_id = i
jdc_seg.points_xyz = data_arr_by_id_list[start_point_pos:stop_point_pos + 1, :]
jdc_seg.update_centroid()
jdc_seg.points_rgb = np.random.randint(0, 255, size=3)
if len(jdc_seg.points_xyz) > least_points_num:
if not first_found:
first_jdc = i
first_found = True
else:
self.add_seg(jdc_seg)
del jdc_seg
start_point_pos = i + 1
if i == total_points_num - 1:
jdc_seg = jdc_segment()
jdc_seg.laser_id = m
jdc_seg.segment_id = i
jdc_seg.points_xyz = np.vstack(
[data_arr_by_id_list[start_point_pos:i + 1, :], data_arr_by_id_list[:first_jdc + 1, :]])
if len(jdc_seg.points_xyz) > least_points_num:
self.add_seg(jdc_seg)
del jdc_seg
def return_potential_seg(self):
pot_seg = list()
for i in xrange(len(self.segs_list)):
tmp = self.segs_list[i]
if tmp.is_potential:
tmp.points_rgb = np.array([255, 0, 0])
pot_seg.append(tmp)
return pot_seg
def cluster_seg(self):
clustered_list = list()
print "jdc number: ", len(self.segs_list)
copy_segs_list = copy.deepcopy(self.segs_list)
z_list = []
for segs in copy_segs_list:
z_list.append(segs.centroid[2])
order_z = np.argsort(z_list)
sorted_segs_list = []
for ele in order_z:
sorted_segs_list.append(copy_segs_list[ele])
# g_list = list()
# r_list = list()
# for i in range(len(sorted_segs_list) - 1, -1, -1):
# if sorted_segs_list[i].centroid[2] <= self.__g_th + 0.01:
# g_list.append(sorted_segs_list[i])
# sorted_segs_list.pop(i)
# elif sorted_segs_list[i].centroid[2] >= self.__r_th - 0.01:
# r_list.append(sorted_segs_list[i])
# sorted_segs_list.pop(i)
# print "glist", len(g_list), "r_list", len(r_list)
# clustered_list.append(g_list)
# clustered_list.append(r_list)
#
searchlist = np.arange(len(sorted_segs_list)).tolist()
while len(searchlist) > 0:
clustered_seg = list()
search_num = len(searchlist) - 2
this_seg = sorted_segs_list[searchlist[-1]]
color_tup = np.array([np.random.randint(255), np.random.randint(255), np.random.randint(255)])
this_seg.points_rgb = color_tup
clustered_seg.append(this_seg)
for i in np.arange(search_num, -1, -1):
tmp_seg = sorted_segs_list[searchlist[i]]
current_agglo_thre = self.__vertical_scan_coef * LA.norm(
tmp_seg.centroid) * self.__agglomerative_cluster_th_ratio
# print "number of clusters in clustered_seg: "+str(len(clustered_seg))
for each in clustered_seg:
if LA.norm(tmp_seg.centroid - each.centroid) < current_agglo_thre and calc_vectors_pca_correlation(
tmp_seg, each):
tmp_seg.points_rgb = color_tup
clustered_seg.append(tmp_seg)
searchlist.remove(searchlist[i])
break
if len(clustered_seg) > 0:
clustered_list.append(clustered_seg)
searchlist.remove(searchlist[-1])
print "seg_co was segmented into " + str(len(clustered_list))
return clustered_list
# calcuate the least distance of points from two segments
def calc_min_len_of_seg_co(a, b):
min_len = 1000000
for i in a:
for j in b:
tmp = LA.norm(i - j)
if tmp < min_len:
min_len = tmp
return min_len
# calcuate the correlation of segments according to their PCA decomposition
def calc_pca_correlation(a, b):
a_list = list()
b_list = list()
for tmp in a:
a_list.extend(tmp.points_xyz)
for tmp in b:
b_list.extend(tmp.points_xyz)
a_arr = np.asarray(a_list)
b_arr = np.asarray(b_list)
sim_r_th = 0.5
sim_b_th = 0.5
a_arr = np.asarray(a_arr)
b_arr = np.asarray(b_arr)
# print a_arr.shape
# print b_arr.shape
if a_arr.shape[0] > 5 and b_arr.shape[0] > 5:
pca_a = PCA(n_components=3)
pca_a.fit(a_arr)
pca_b = PCA(n_components=3)
pca_b.fit(b_arr)
# print pca_a.components_
# print pca_b.components_
sim_r = norm(pca_a.explained_variance_ratio_ - pca_b.explained_variance_ratio_)
sim_b = 0
for i in xrange(3):
sim_b = sim_b + abs((pca_a.explained_variance_ratio_[i] + pca_b.explained_variance_ratio_[i]) / 2 * (
np.dot(pca_a.components_[i], pca_b.components_[i])) / (
norm(pca_a.components_[i]) * norm(pca_b.components_[i])))
# print sim_b
if sim_r < sim_r_th and sim_b > sim_b_th:
return True
else:
return False
else:
return False
# calculate the correlation of tow scanline segments
def calc_vectors_pca_correlation(a, b):
sim_r_th = 0.2 # 0.5
sim_b_th = 0.9 # 0.5
a_arr = np.asarray(a.points_xyz)
b_arr = np.asarray(b.points_xyz)
if a_arr.shape[0] > 5 and b_arr.shape[0] > 5:
pca_a = PCA(n_components=3)
pca_a.fit(a_arr)
pca_b = PCA(n_components=3)
pca_b.fit(b_arr)
# print pca_a.components_
# print pca_b.components_
sim_r = norm(pca_a.explained_variance_ratio_ - pca_b.explained_variance_ratio_)
sim_b = 0
for i in xrange(3):
sim_b = sim_b + abs((pca_a.explained_variance_ratio_[i] + pca_b.explained_variance_ratio_[i]) / 2 * (
np.dot(pca_a.components_[i], pca_b.components_[i])) / (
np.linalg.norm(pca_a.components_[i]) * np.linalg.norm(pca_b.components_[i])))
if sim_r < sim_r_th and sim_b > sim_b_th:
return True
else:
return False
else:
return False
if debug:
import vtk
def show_pcd_ndarray(array_data, color_arr=[0, 255, 0]):
all_rows = array_data.shape[0]
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for k in xrange(all_rows):
point = array_data[k, :]
id = Points.InsertNextPoint(point[0], point[1], point[2])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(id)
if vtk.VTK_MAJOR_VERSION > 6:
Colors.InsertNextTuple(color_arr)
else:
Colors.InsertNextTupleValue(color_arr)
dis_tmp = np.sqrt((point ** 2).sum(0))
# Colors.InsertNextTupleValue([0,255-dis_tmp/max_dist*255,0])
# Colors.InsertNextTupleValue([255-abs(point[0]/x_max*255),255-abs(point[1]/y_max*255),255-abs(point[2]/z_max*255)])
# Colors.InsertNextTupleValue([255-abs(point[0]/x_max*255),255,255])
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetVerts(Vertices)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetColorModeToDefault()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(5)
# Renderer
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.SetBackground(.2, .3, .4)
renderer.ResetCamera()
# Render Window
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
# Interactor
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Begin Interaction
renderWindow.Render()
renderWindowInteractor.Start()
# determine whether a segment is the potential chessboard's point cloud
def is_marker(file_full_path, range_res, points_num_th=250):
# result = False
jdcs_collection = cPickle.load(open(file_full_path, 'rb'))
if debug:
print file_full_path
tmp_list = list()
for jdc in jdcs_collection:
tmp_list.extend(jdc)
arr = np.array(tmp_list)
if debug:
show_pcd_ndarray(arr)
if arr.shape[0] < points_num_th:
if debug:
print "points num: ", arr.shape[0]
return False
# use the distance between the marker's center and the lidar to filter
avg = arr.mean(axis=0)
if np.linalg.norm(avg) > range_res:
if debug:
print "avg: ", np.linalg.norm(avg)
return False
# check whether is a plane
pca = PCA(n_components=3)
pca.fit(arr)
if pca.explained_variance_ratio_[2] > params['chessboard_detect_planar_PCA_ratio']:
if debug:
print "pca: ", pca.explained_variance_ratio_
return False
# map to 2D
tmp = np.dot(pca.components_, arr.T).T
points = tmp[:, :2]
# substract the mean point
points -= points.mean(axis=0)
bbx = points.max(axis=0) - points.min(axis=0)
if debug:
print "bbx: ", bbx
if (marker_th_l_min < bbx[0] < marker_th_l_max and marker_th_s_min < bbx[1] < marker_th_s_max) or (
marker_th_s_min < bbx[0] < marker_th_s_max and marker_th_l_min < bbx[1] < marker_th_l_max):
# analyse the distribution of the points in four quadrants
x_lin = [points.min(axis=0)[0], (points.min(axis=0)[0] + points.max(axis=0)[0]) / 2, points.max(axis=0)[0]]
y_lin = [points.min(axis=0)[1], (points.min(axis=0)[1] + points.max(axis=0)[1]) / 2, points.max(axis=0)[1]]
num_in_quadrant_ls = []
for i in xrange(2):
x_prd = [x_lin[i], x_lin[i + 1]]
for j in xrange(2):
y_prd = [y_lin[j], y_lin[j + 1]]
num_in_quadrant_ls.append(np.count_nonzero(
(points[:, 0] >= x_prd[0]) & (points[:, 0] <= x_prd[1]) & (points[:, 1] >= y_prd[0]) & (
points[:, 1] <= y_prd[1])))
normed = np.array(num_in_quadrant_ls, dtype=np.float32) / sum(num_in_quadrant_ls)
if normed.max() - normed.min() < 0.15:
print file_full_path
print "passed"
print "pca: ", pca.explained_variance_ratio_
if debug:
show_pcd_ndarray(arr)
return True
else:
return False
else:
# print "over length of diagonal line"
return False
# find the point cloud of chessboard from segmented results
def find_marker(file_path, csv_path, range_res=params['marker_range_limit']):
file_list = os.listdir(file_path)
res_ls = []
for file in file_list:
# print file_path + file
if is_marker(file_path + file, range_res):
# print file
res_ls.append(file_path + file)
print len(res_ls)
if len(res_ls) == 0:
AssertionError("no marker is found")
if len(res_ls) > 1:
print "one than one candicate of the marker is found!"
print res_ls
print "The segment with most uniform intensity distribution is considered as the marker"
num_ls = []
for file in res_ls:
arr = exact_full_marker_data(csv_path, [file])
intensity_arr = arr[:, 3]
hist, bin_edges = np.histogram(intensity_arr, 100)
if debug:
print hist, bin_edges
num_ls.append(len(np.nonzero(hist)[0]))
res_ls = [res_ls[np.argmax(num_ls)]]
if debug:
print res_ls
assert len(res_ls) == 1
print "marker is found!"
return res_ls
# get the reflectance information of the chessboard's point cloud
def exact_full_marker_data(csv_path, marker_pkl):
all_data_arr = np.genfromtxt(csv_path, delimiter=",", skip_header=1)
marker_jdcs_collection = cPickle.load(open(marker_pkl[0], "rb"))
tmp_list = list()
for jdc in marker_jdcs_collection:
tmp_list.extend(jdc)
marker_pcd_arr = np.array(tmp_list)
tree = spatial.KDTree(all_data_arr[:, :3])
marker_full_data_ls = []
for i in xrange(marker_pcd_arr.shape[0]):
ret = tree.query(marker_pcd_arr[i])
marker_full_data_ls.append(all_data_arr[ret[1]])
marker_full_data_arr = np.array(marker_full_data_ls)
return marker_full_data_arr
# get the Hessian normal form of 3D plane
def get_plane_model(arr):
import pcl
ransac_distance_threshold = 0.05
point_cloud = pcl.PointCloud(arr.astype(np.float32))
seg = point_cloud.make_segmenter_normals(ksearch=50)
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_max_iterations(10000)
seg.set_distance_threshold(ransac_distance_threshold)
indices, model = seg.segment()
print "percentage of points in plane model: ", np.float32(len(indices)) / arr.shape[0]
return model
# project a point to an estimated plane
def p2pl_proj(pl_norm, pl_p, p):
v = np.array(p) - np.array(pl_p)
dis = np.dot(v, pl_norm)
res = p - dis * pl_norm
return res
# estimate the mean of the low and high intensity of the chessboard's points
def get_gmm_para(arr):
from sklearn import mixture
gmm = mixture.GaussianMixture(n_components=2, covariance_type="diag", max_iter=10000,
means_init=np.array([[5], [60]])).fit(arr)
return gmm
# transfter the points of the chessboard to chessboard plane
# for implementation convenience, the vector with the largest ratio is mapped to the y-axis and the vector with the
# second ratio is mapped to x-axis, which is different from the explaination in the paper
def transfer_by_pca(arr):
# to roate the arr correctly, the direction of the axis has to be found correct(PCA also shows the aixs but not the direction of axis)
#
pca = PCA(n_components=3)
pca.fit(arr)
####################################################
# there are there requirements for the coordinates axes for the coordinate system of the chessboard
# 1. right-hand rule
# 2. z axis should point to the origin
# 3. the angle between y axis of chessboard and z axis of velodyne less than 90 deg
####################################################
trans_mat = pca.components_
# swith x and y axis
trans_mat[[0, 1]] = trans_mat[[1, 0]]
# cal z axis to obey the right hands
trans_mat[2] = np.cross(trans_mat[0], trans_mat[1])
# to make angle between y axis of chessboard and z axis of velodyne less than 90 deg
sign2 = np.sign(np.dot(trans_mat[1], np.array([0, 0, 1])))
# print "sign2", sign2
trans_mat[[0, 1]] = sign2 * trans_mat[[0, 1]]
# to make the norm vector point to the side where the origin exists
# the angle between z axis and the vector from one point on the board to the origin should be less than 90 deg
sign = np.sign(np.dot(trans_mat[2], 0 - arr.mean(axis=0).T))
# print "sign", sign
trans_mat[[0, 2]] = sign * trans_mat[[0, 2]]
tmp = np.dot(arr, trans_mat.T)
# print pca.components_
# print "x,y,cross", np.cross(pca.components_[1], pca.components_[2])
return trans_mat, tmp
# cost function for fitting the chessboard model and the chessboard's point cloud
def cost_func_for_opt_mini(theta_t, transed_pcd, marker_full_data_arr, gray_zone, x_res=marker_size[0],
y_res=marker_size[1],
grid_len=params["grid_length"]): # ls: grids coords(not used), arr: markers arr
transed_pcd_for_costf = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], theta_t[0]),
(transed_pcd + np.array([[theta_t[1], theta_t[2], 0]])).T).T
arr = np.hstack([transed_pcd_for_costf, marker_full_data_arr[:, 3:]])
bound = np.array([[0, 0], [0, y_res], [x_res, y_res], [x_res, 0]]) * grid_len - np.array([x_res,
y_res]) * grid_len / 2
x_grid_arr = (np.array(range(0, x_res + 1)) - float(x_res) / 2) * grid_len
y_grid_arr = (np.array(range(0, y_res + 1)) - float(y_res) / 2) * grid_len
x = range(arr.shape[0])
y = []
polygon_path = mplPath.Path(bound)
cost = 0
for row in arr:
if polygon_path.contains_point(row[:2]):
if gray_zone[0] < row[params['intensity_col_ind']] < gray_zone[1]:
y.append(0.5)
continue
else:
i = int((row[0] + x_res * grid_len / 2) / grid_len)
j = int((row[1] + y_res * grid_len / 2) / grid_len)
if i % 2 == 0:
if j % 2 == 0:
color = 0
else:
color = 1
else:
if j % 2 == 0:
color = 1
else:
color = 0
estimated_color = (np.sign(row[params['intensity_col_ind']] - gray_zone[1]) + 1) / 2
if estimated_color != color:
cost += (min(abs(row[0] - x_grid_arr)) + min(abs(row[1] - y_grid_arr)))
y.append(color)
else:
cost += (min(abs(row[0] - x_grid_arr)) + min(abs(row[1] - y_grid_arr)))
y.append(2)
return cost
# create the chessboard model
def generate_grid_coords(x_res=marker_size[0], y_res=marker_size[1], grid_len=params['grid_length']): # res, resolution
ls = []
for i in xrange(x_res):
for j in xrange(y_res):
orig = np.array([i, j, 0]) * grid_len - np.array([x_res, y_res, 0]) * grid_len / 2
p1 = np.array([i + 1, j, 0]) * grid_len - np.array([x_res, y_res, 0]) * grid_len / 2
p2 = np.array([i, j + 1, 0]) * grid_len - np.array([x_res, y_res, 0]) * grid_len / 2
if i % 2 == params['start_pattern_corner']:
if j % 2 == params['start_pattern_corner']:
color = params['start_pattern_corner']
else:
color = 1 - params['start_pattern_corner']
else:
if j % 2 == params['start_pattern_corner']:
color = 1 - params['start_pattern_corner']
else:
color = params['start_pattern_corner']
ls.append([orig, p1, p2, color])
return ls
# analyze the intensity distribution of the chessboard's point cloud to determine the gray zone
def get_gray_thre(intes_arr):
gray_zone_debug = False
# find the gray period of intensity (if some point has the intensity in this period, the weight for this kind of pints will be zero)
gmm = get_gmm_para(np.expand_dims(intes_arr, axis=1))
tmp_thres = gmm.means_.mean()
if gray_zone_debug:
print "Mean of intensity by GMM: ", tmp_thres
hist, bin_edges = np.histogram(intes_arr, 100)
if gray_zone_debug:
import matplotlib.pyplot as plt
plt.hist(intes_arr, 100)
plt.show()
order = np.argsort(hist)
low_intensity = -1
high_intensity = -1
low_found = False
high_found = False
for i in range(order.shape[0] - 1, -1, -1):
if bin_edges[order[i]] > tmp_thres and not high_found:
high_found = True
high_intensity = bin_edges[order[i]]
if bin_edges[order[i]] < tmp_thres and not low_found:
low_found = True
low_intensity = bin_edges[order[i]]
if high_found and low_found:
break
else:
print "gray zone is not well detected!"
print low_intensity, high_intensity
return low_intensity, high_intensity
# segment single frame of the point cloud into several segments
def seg_pcd(csv_path, save_folder_path):
import pcl
seg_num_thre = 3
jdc_points_num_thres = 0
seg_count = 0
jdc_collection = jdc_segments_collection()
jdc_collection.set_csv_path(csv_path)
print "csv_file loaded!"
jdc_collection.get_potential_segments()
clustered_seg_list = jdc_collection.cluster_seg()
potential_seg_co_list = list()
for tmp_seg_co in clustered_seg_list:
# if is_human(tmp_seg_co):
# color_tuple=np.array([0,255,0])
potential_seg_co_list.append(tmp_seg_co)
twice_clustered_seg_list = clustered_seg_list
print "twice_clustered_seg num=" + str(len(twice_clustered_seg_list))
parts = csv_path.split("/")
if os.path.isdir(save_folder_path + parts[-1].split(".")[0]):
shutil.rmtree(save_folder_path + parts[-1].split(".")[0])
os.makedirs(save_folder_path + parts[-1].split(".")[0])
count_after_filter = 0
for tmp_seg_co in twice_clustered_seg_list:
if len(tmp_seg_co) > seg_num_thre:
count_after_filter += 1
list_for_pedestrians_pcd = list()
list_for_jdcs = list()
for j in xrange(len(tmp_seg_co)):
tmp_seg = tmp_seg_co[j]
list_for_jdcs.append(tmp_seg.points_xyz.tolist())
for k in xrange(tmp_seg.points_xyz.shape[0]):
point = tmp_seg.points_xyz[k, :]
list_for_pedestrians_pcd.append(point)
arr_for_pedestrians_pcd = np.asarray(list_for_pedestrians_pcd, dtype=np.float32)
if arr_for_pedestrians_pcd.shape[0] > 0:
pcd_pedestrian = pcl.PointCloud(arr_for_pedestrians_pcd)
parts = csv_path.split("/")
if pcd_pedestrian.size > jdc_points_num_thres:
save_path_for_pedestrian_txt = save_folder_path + "/" + parts[-1].split(".")[0] + "/" + \
parts[-1].split(".")[0] + "block" + str(
seg_count) + ".txt"
seg_count += 1
cPickle.dump(list_for_jdcs, open(save_path_for_pedestrian_txt, 'wb'))
del arr_for_pedestrians_pcd
del list_for_pedestrians_pcd
del list_for_jdcs
del jdc_collection
# optimize to find the pose solution that makes the chessboard model fit the detected chessboard's point cloud best
def opt_min(param_ls, initial_guess=np.zeros(3).tolist()):
method = param_ls[0]
try:
res = minimize(cost_func_for_opt_mini, initial_guess, args=param_ls[1],
method=method, tol=1e-10, options={"maxiter": 10000000}) # , "disp": True
print method, ": ", res.fun, " ", res.x
return res.fun, [method, res]
except:
print method, ": could not be applied"
return None
# utilize the defined functions to get the chessboard's corners for single frame point cloud
def run(csv_path, save_folder_path=os.path.join(params['base_dir'], "output/pcd_seg/"), size=marker_size):
if not_segmented:
seg_pcd(csv_path, save_folder_path)
parts = csv_path.split("/")
find_marker_path = save_folder_path + parts[-1].split(".")[0] + "/"
marker_pkl = find_marker(file_path=os.path.abspath(find_marker_path) + "/", csv_path=csv_path)
marker_full_data_arr = exact_full_marker_data(csv_path, marker_pkl)
# fit the points to the plane model
model = get_plane_model(marker_full_data_arr[:, :3])
pl_p = np.array([0, 0, -model[3] / model[2]]) # a point on the plane of the model
normal = np.array(model[:3])
fitted_list = []
for i in marker_full_data_arr[:, :3]:
b = p2pl_proj(normal, pl_p, i)
fitted_list.append(b)
marker_data_arr_fitted = np.array(fitted_list)
marker_full_data_arr_fitted = np.hstack([marker_data_arr_fitted, marker_full_data_arr[:, 3:]])
# trans chessboard
if 1:
# render for model of checkerboard
rot1, transed_pcd = transfer_by_pca(marker_data_arr_fitted)
t1 = transed_pcd.mean(axis=0)
transed_pcd = transed_pcd - t1
# calculate the rotate angle in xoy palne around the z axis
if 1:
low_intes, high_intens = get_gray_thre(marker_full_data_arr_fitted[:, params['intensity_col_ind']])
print "low_intes,high_intes:", low_intes, high_intens
rate = 2
gray_zone = np.array([((rate - 1) * low_intes + high_intens), (low_intes + (rate - 1) * high_intens)]) / rate
methods = ['Powell']
res_dict = {}
# for parallel processing
args = (transed_pcd, marker_full_data_arr, gray_zone,)
param_ls = [[method, args] for method in methods]
res_ls = map(opt_min, param_ls)
for item in res_ls:
if item is not None:
res_dict[item[0]] = item[1]
res = res_dict[min(res_dict)][1]
print res_dict[min(res_dict)][0]
print res
rot2 = transforms3d.axangles.axangle2mat([0, 0, 1], res.x[0])
t2 = np.array([res.x[1], res.x[2], 0])
if 1:
transed_pcd = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], res.x[0]),
(transed_pcd + np.array([[res.x[1], res.x[2], 0]])).T).T
gird_coords = generate_grid_coords()
grid_ls = [(p[0]).flatten()[:2] for p in gird_coords]
corner_arr = np.transpose(np.array(grid_ls).reshape(size[0], size[1], 2)[1:, 1:], (1, 0, 2))
return [rot1, t1, rot2, t2, corner_arr, res.x, os.path.relpath(marker_pkl[0])]
# for multiple processing
def main_for_pool(i):
pcd_file = os.path.join(params['base_dir'], "pcd/") + str(i).zfill(params["file_name_digits"]) + ".csv"
print pcd_file
try:
result = run(csv_path=pcd_file)
print result
save_file_path = os.path.join(params['base_dir'], "output/pcd_seg/") + str(i).zfill(
params["file_name_digits"]) + "_pcd_result.pkl"
with open(os.path.abspath(save_file_path), 'w') as file:
file.truncate()
cPickle.dump(result, file)
print "pkl file was saved to " + save_file_path + " successfully!"
print
print
except AssertionError:
print "marker cannot be found"
print "skip " + pcd_file
# main function for detecting corners from pcd files in the folder
def detect_pcd_corners():
file_ls = os.listdir(os.path.join(params['base_dir'], "pcd"))
pcd_ls = []
for file in file_ls:
if file.find("csv") > -1:
pcd_ls.append(int(re.findall(r'\d+', file)[0]))
if params["multi_proc"]:
pool = Pool(params["proc_num"])
pool.map(main_for_pool, pcd_ls)
else:
for ind in pcd_ls:
main_for_pool(ind)
if __name__ == "__main__":
detect_pcd_corners()
# main_for_pool(1)
|
bsd-2-clause
|
Srisai85/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
danielemichilli/LSPs
|
src/Pulses.py
|
1
|
1299
|
import numpy as np
import pandas as pd
from Parameters import *
def Generator(events):
#-------------------------------
# Create a table with the pulses
#-------------------------------
gb = events.groupby('Pulse',sort=False)
pulses = events.loc[gb.Sigma.idxmax()]
pulses.index = pulses.Pulse
pulses.index.name = None
pulses = pulses.loc[:,['SAP','BEAM','DM','Sigma','Time','Duration','Sample','Time_org','Downfact']]
pulses.index.name = 'idx'
pulses['Pulse'] = 0
pulses.Pulse = pulses.Pulse.astype(np.int8)
pulses['Candidate'] = -1
pulses.Candidate = pulses.Candidate.astype(np.int32)
pulses['dDM'] = (gb.DM.max() - gb.DM.min()) / 2.
pulses.dDM=pulses.dDM.astype(np.float32)
pulses['dTime'] = (gb.Time.max() - gb.Time.min()) / 2.
pulses.dTime=pulses.dTime.astype(np.float32)
#pulses['dSample'] = (gb.Sample.max() - gb.Sample.min()) / 2.
#pulses.dSample = pulses.dSample.astype(np.float32)
pulses['DM_c'] = (gb.DM.max() + gb.DM.min()) / 2.
pulses.DM_c=pulses.DM_c.astype(np.float32)
pulses['Time_c'] = (gb.Time.max() + gb.Time.min()) / 2.
pulses.Time_c=pulses.Time_c.astype(np.float32)
pulses['N_events'] = gb.DM.count()
pulses.N_events = pulses.N_events.astype(np.int16)
pulses = pulses[pulses.N_events>4]
return pulses
|
mit
|
chenxulong/quanteco
|
examples/qs.py
|
7
|
1456
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from matplotlib import cm
xmin, xmax = -4, 12
x = 10
alpha = 0.5
m, v = x, 10
xgrid = np.linspace(xmin, xmax, 200)
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.set_ylim(-0.05, 0.5)
ax.set_xticks((x,))
ax.set_xticklabels((r'$x$',), fontsize=18)
ax.set_yticks(())
K = 3
for i in range(K):
m = alpha * m
v = alpha * alpha * v + 1
f = norm(loc=m, scale=np.sqrt(v))
k = (i + 0.5) / K
ax.plot(xgrid, f.pdf(xgrid), lw=1, color='black', alpha=0.4)
ax.fill_between(xgrid, 0 * xgrid, f.pdf(xgrid), color=cm.jet(k), alpha=0.4)
ax.annotate(r'$Q(x,\cdot)$', xy=(6.6, 0.2), xycoords='data',
xytext=(20, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2"))
ax.annotate(r'$Q^2(x,\cdot)$', xy=(3.6, 0.24), xycoords='data',
xytext=(20, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2"))
ax.annotate(r'$Q^3(x,\cdot)$', xy=(-0.2, 0.28), xycoords='data',
xytext=(-90, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2"))
fig.show()
|
bsd-3-clause
|
YeoLab/gscripts
|
gscripts/output_parsers/parseMiso.py
|
1
|
10081
|
import numpy as np
import itertools
from collections import Counter
import sys
import pandas as pd
def max_csv(x):
'''
Of integers separated by commas, take the max
e.g. 75112684,75112684 would return 75112684
or 75112684,75112689 would return 75112689
'''
return max(map(int, x.split(',')))
def min_csv(x):
'''
Of integers separated by commas, take the minimum
e.g. 75112684,75112684 would return 75112684
or 75112684,75112689 would return 75112684
'''
return min(map(int, x.split(',')))
def read_miso_summary(filename):
'''
Reads a MISO summary file as a pandas dataframe, and adds these columns:
1. a copy-paste-able genome location at the end, based on the minimum
mRNA_starts and maximum mRNA_ends. (df.genome_location)
2. The difference between df.ci_high and df.ci_low (df.ci_diff)
3. The left and right halves of the confidence interval, e.g. the right
half is df.ci_high - df.miso_posterior_mean. (df.ci_left_half and
df.ci_right_half)
4. The max of the two left and right confidence interval halves
(df.ci_halves_max)
'''
df = pd.read_table(filename)
genome_location = pd.DataFrame(
['%s:%d-%d' % (chrom, min_csv(starts), max_csv(stops))
for chrom, starts, stops in zip(df.chrom,
df.mRNA_starts,
df.mRNA_ends)],
columns=['genome_location'], index=df.index)
ci_diff = pd.DataFrame(df.ci_high - df.ci_low, columns=['ci_diff'],
index=df.index)
ci_halves = pd.DataFrame(
{'ci_left_half': (df.ci_high - df.miso_posterior_mean),
'ci_right_half': (df.miso_posterior_mean - df.ci_low)},
index=df.index)
ci_halves_max = pd.DataFrame(ci_halves.max(axis=1),
columns=['ci_halves_max'])
return pd.concat([df, genome_location, ci_diff, ci_halves,
ci_halves_max], axis=1)
def uncertainty(msmts):
""" calculate combined uncertainty of tuples [(xi, xi-ui, xi+ui), (xj, xj-uj, xi+uj), ...]"""
msmts = np.array(msmts)
if msmts.shape[0] == 1:
return msmts.ravel()
sumMean = np.sum(msmts[:, 0])
means = msmts[:, 0]
low = msmts[:, 1]
high = msmts[:, 2]
stdevs = np.max(np.c_[np.abs(means - low), np.abs(means - high)],
axis=1)# std of msmts
term1 = np.sum(stdevs ** 2)
return np.array([sumMean, max(0, sumMean - np.sqrt(term1)),
min(1, sumMean + np.sqrt(term1))])
def parseMisoComparison(line):
event_name, miso_posterior_mean1, ci_low1, ci_high1, miso_posterior_mean2, ci_low2, ci_high2, diff, bf, \
isoforms, counts1, assigned_counts1, counts2, assigned_counts2, chrom, strand, mRNA_starts, \
mRNA_ends = line.split("\t")
counts1 = tuple(
[(tuple(map(int, i.split(":")[0].split(","))), int(i.split(":")[1])) \
for i in
[i.replace("(", "").replace(")", "") for i in counts1.split(",(")]])
counts2 = tuple(
[(tuple(map(int, i.split(":")[0].split(","))), int(i.split(":")[1])) \
for i in
[i.replace("(", "").replace(")", "") for i in counts2.split(",(")]])
means1 = map(float, miso_posterior_mean1.split(","))
low1 = map(float, ci_low1.split(","))
high1 = map(float, ci_high1.split(","))
means2 = map(float, miso_posterior_mean2.split(","))
low2 = map(float, ci_low2.split(","))
high2 = map(float, ci_high2.split(","))
isoformLabels = isoforms.split(",")
isoformTypes = np.array([len(z.split("_")) - 2 for z in isoformLabels])
type1_1 = uncertainty(np.c_[
np.array(means1)[isoformTypes == 0], np.array(low1)[isoformTypes == 0], \
np.array(high1)[isoformTypes == 0]])
type2_1 = uncertainty(np.c_[
np.array(means1)[isoformTypes == 1], np.array(low1)[isoformTypes == 1], \
np.array(high1)[isoformTypes == 1]])
type1_2 = uncertainty(np.c_[
np.array(means2)[isoformTypes == 0], np.array(low2)[isoformTypes == 0], \
np.array(high2)[isoformTypes == 0]])
type2_2 = uncertainty(np.c_[
np.array(means2)[isoformTypes == 1], np.array(low2)[isoformTypes == 1], \
np.array(high2)[isoformTypes == 1]])
assigned_counts1 = map(int,
[l for sublist in assigned_counts1.split(",") for l
in sublist.split(":")])[1::2]
assigned_counts2 = map(int,
[l for sublist in assigned_counts2.split(",") for l
in sublist.split(":")])[1::2]
asCts1 = Counter()
for iType, ct in zip(isoformTypes, assigned_counts1):
asCts1[str(iType)] += ct
Nassigned_counts1 = "0:%d,1:%d" % (asCts1['0'], asCts1['1'])
NCts1 = Counter()
for cat, ct in counts1:
cat = np.array(cat, dtype=bool)
iso1Type = np.array(isoformTypes, dtype=bool)
iso0Type = np.invert(iso1Type)
countType = str((
int(sum(cat * iso0Type) > 0), int(sum(cat * iso1Type) > 0))).replace(
" ", "")
NCts1[countType] += ct
NCounts1 = ",".join([k + ":" + str(NCts1[k]) for k in sorted(NCts1)])
asCts2 = Counter()
for iType, ct in zip(isoformTypes, assigned_counts2):
asCts2[str(iType)] += ct
Nassigned_counts2 = "0:%d,1:%d" % (asCts2['0'], asCts2['1'])
NCts2 = Counter()
for cat, ct in counts2:
cat = np.array(cat, dtype=bool)
iso1Type = np.array(isoformTypes, dtype=bool)
iso0Type = np.invert(iso1Type)
countType = str((
int(sum(cat * iso0Type) > 0), int(sum(cat * iso1Type) > 0))).replace(
" ", "")
NCts2[countType] += ct
NCounts2 = ",".join([k + ":" + str(NCts2[k]) for k in sorted(NCts2)])
repExIsoform = np.array(isoformLabels)[isoformTypes == 0][0][:-1] + "*'"
repInIsoform = np.array(isoformLabels)[isoformTypes == 1][0][:-1] + "*'"
repExmRNA_start = np.array(mRNA_starts.split(","))[isoformTypes == 0][0]
repExmRNA_end = np.array(mRNA_ends.split(","))[isoformTypes == 0][0]
repInmRNA_start = np.array(mRNA_starts.split(","))[isoformTypes == 1][0]
repInmRNA_end = np.array(mRNA_ends.split(","))[isoformTypes == 1][0]
repStarts = ",".join(map(str, [repExmRNA_start, repInmRNA_start]))
repEnds = ",".join(map(str, [repExmRNA_end, repInmRNA_end]))
diff = type2_1[0] - type2_2[0]
bf = "%.2f" % np.mean(np.array(map(float, bf.split(","))))
return "\t".join(
map(str, [event_name, "\t".join(["%.2f" % i for i in type2_1.ravel()]),
"\t".join(["%.2f" % i for i in type2_2.ravel()]),
diff, bf,
",".join([repExIsoform, repInIsoform]),
NCounts1, Nassigned_counts1,
NCounts2, Nassigned_counts2,
chrom, strand, repStarts, repEnds]))
def parseMisoSummary(line):
try:
event_name, miso_posterior_mean, ci_low, ci_high, isoforms, counts, assigned_counts, \
chrom, strand, mRNA_starts, mRNA_ends = line.split("\t")
except:
raise
counts = tuple(
[(tuple(map(int, i.split(":")[0].split(","))), int(i.split(":")[1])) \
for i in
[i.replace("(", "").replace(")", "") for i in counts.split(",(")]])
means = map(float, miso_posterior_mean.split(","))
low = map(float, ci_low.split(","))
high = map(float, ci_high.split(","))
isoformLabels = isoforms.split(",")
isoformTypes = np.array([len(z.split("_")) - 2 for z in isoformLabels])
type1 = uncertainty(np.c_[
np.array(means)[isoformTypes == 0], np.array(low)[isoformTypes == 0], \
np.array(high)[isoformTypes == 0]])
type2 = uncertainty(np.c_[
np.array(means)[isoformTypes == 1], np.array(low)[isoformTypes == 1], \
np.array(high)[isoformTypes == 1]])
assigned_counts = map(int,
[l for sublist in assigned_counts.split(",") for l in
sublist.split(":")])[1::2]
asCts = Counter()
for iType, ct in zip(isoformTypes, assigned_counts):
asCts[str(iType)] += ct
Nassigned_counts = "0:%d,1:%d" % (asCts['0'], asCts['1'])
NCts = Counter()
for cat, ct in counts:
cat = np.array(cat, dtype=bool)
iso1Type = np.array(isoformTypes, dtype=bool)
iso0Type = np.invert(iso1Type)
countType = str((
int(sum(cat * iso0Type) > 0), int(sum(cat * iso1Type) > 0))).replace(
" ", "")
NCts[countType] += ct
NCounts = ",".join([k + ":" + str(NCts[k]) for k in sorted(NCts)])
repExIsoform = np.array(isoformLabels)[isoformTypes == 0][0] + "*"
repInIsoform = np.array(isoformLabels)[isoformTypes == 1][0] + "*"
repExmRNA_start = np.array(mRNA_starts.split(","))[isoformTypes == 0][0]
repExmRNA_end = np.array(mRNA_ends.split(","))[isoformTypes == 0][0]
repInmRNA_start = np.array(mRNA_starts.split(","))[isoformTypes == 1][0]
repInmRNA_end = np.array(mRNA_ends.split(","))[isoformTypes == 1][0]
repStarts = ",".join(map(str, [repExmRNA_start, repInmRNA_start]))
repEnds = ",".join(map(str, [repExmRNA_end, repInmRNA_end]))
return "\t".join(map(str, [event_name,
"\t".join(["%.2f" % i for i in type2.ravel()]),
",".join([repExIsoform, repInIsoform]), \
NCounts, Nassigned_counts, chrom, strand,
repStarts, repEnds]))
if __name__ == "__main__":
for misoFile in sys.argv[1:]:
with open(misoFile) as f:
print f.readline().strip() #header
for line in f.readlines():
line = line.strip()
if "," in line.split("\t")[1]:
if len(line.split("\t")) < 18:
print parseMisoSummary(line)
else:
print parseMisoComparison(line)
else:
print line
|
mit
|
f3r/scikit-learn
|
examples/plot_digits_pipe.py
|
70
|
1813
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
seckcoder/lang-learn
|
python/sklearn/examples/ensemble/plot_gradient_boosting_regression.py
|
3
|
2480
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print __doc__
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD
import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
pl.figure(figsize=(12, 6))
pl.subplot(1, 2, 1)
pl.title('Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
pl.legend(loc='upper right')
pl.xlabel('Boosting Iterations')
pl.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, boston.feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
|
unlicense
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/cbook.py
|
4
|
81449
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
from itertools import repeat
import collections
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
http://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
------------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
try:
func = func.__func__
except AttributeError:
# classmethods in Python2.6 and below lack the __func__
# attribute so we need to hack around to get it
method = func.__get__(None, object)
if hasattr(method, '__func__'):
func = method.__func__
elif hasattr(method, 'im_func'):
func = method.im_func
else:
# Nothing we can do really... just return the original
# classmethod
return func
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
'''
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
'''
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
'''
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
'''
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
'''
Compare the held function and instance with that held by
another proxy.
'''
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
'''
Inverse of __eq__.
'''
return not self.__eq__(other)
def __hash__(self):
return self._hash
class CallbackRegistry(object):
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
"""
def __init__(self):
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
------------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
---------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
--------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
'Return a list of unique elements of *x*'
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
'return true if *obj* is iterable'
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""
Returns true if *obj* can be hashed
"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(os.path.dirname(__file__),
"mpl-data", "sample_data")
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos < N - 1:
self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen('ps -p %d -o osz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen('ps -p %d -o rss,sz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen('ps -p %d -o rss,vsz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen(["tasklist", "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor(object):
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, QR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
# The ls_mapper maps short codes for line style to their full name used
# by backends
# The reverse mapper is for mapping full names to short ones
ls_mapper_r = dict([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while 1:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
'''
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
'''
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def _step_validation(x, *args):
"""
Helper function of `pts_to_*step` functions
This function does all of the normalization required to the
input and generate the template for output
"""
args = tuple(np.asanyarray(y) for y in args)
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("x must be 1 dimenional")
if len(args) == 0:
raise ValueError("At least one Y value must be passed")
return np.vstack((x, ) + args)
def pts_to_prestep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = np.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_poststep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_midstep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
# convert 2D array back to tuple
return tuple(steps)
STEP_LOOKUP_MAP = {'pre': pts_to_prestep,
'post': pts_to_poststep,
'mid': pts_to_midstep,
'step-pre': pts_to_prestep,
'step-post': pts_to_poststep,
'step-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = np.atleast_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
|
mit
|
dagophil/vigra
|
vigranumpy/examples/non_local_mean_2d_color.py
|
6
|
1450
|
from __future__ import print_function
import vigra
from vigra import numpy
from matplotlib import pylab
from time import time
import multiprocessing
path = "69015.jpg"
#path = "12074.jpg"
path = "100075.jpg"
path = "12003.jpg"
data = vigra.impex.readImage(path).astype(numpy.float32)
cpus = multiprocessing.cpu_count()
print("nCpus",cpus)
t0 =time()
#for c in range(3):
# cimg=data[:,:,c]
# cimg-=cimg.min()
# cimg/=cimg.max()
iters = 10
#policy = vigra.filters.RatioPolicy(sigma=10.0, meanRatio=0.95, varRatio=0.5)
policy = vigra.filters.NormPolicy(sigma=50.0, meanDist=50, varRatio=0.5)
#data-=100.0
res = vigra.filters.nonLocalMean2d(data,policy=policy,searchRadius=5,patchRadius=1,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
for i in range(iters-1):
res = vigra.filters.nonLocalMean2d(res,policy=policy,searchRadius=5,patchRadius=2,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
t1 = time()
res = vigra.taggedView(res,'xyc')
gma = vigra.filters.gaussianGradientMagnitude(res,4.0)
gmb = vigra.filters.gaussianGradientMagnitude(data,4.0)
#data+=100.0
print(t1-t0)
imgs = [data,res,gma,gmb]
for img in imgs:
for c in range(img.shape[2]):
cimg=img[:,:,c]
cimg-=cimg.min()
cimg/=cimg.max()
f = pylab.figure()
for n, arr in enumerate(imgs):
arr = arr.squeeze()
f.add_subplot(1, len(imgs), n+1)
pylab.imshow(arr.swapaxes(0,1))
pylab.title('denoised')
pylab.show()
|
mit
|
m11s/MissionPlanner
|
Lib/site-packages/scipy/signal/fir_filter_design.py
|
53
|
18572
|
"""Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
|
gpl-3.0
|
JackKelly/neuralnilm_prototype
|
scripts/e298.py
|
2
|
10743
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.5,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
2000: 5e-05
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
]
)
def exp_a(name):
# 5 appliances
# avg valid cost = 1.1260980368
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
# one pool layer
# avg valid cost = 1.2261329889
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# BLSTM
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# BLSTM 2x2x pool
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1/sqrt(N))
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abcde')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=4000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
themrmax/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
55
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
chromium/chromium
|
tools/perf/cli_tools/flakiness_cli/analysis.py
|
5
|
2950
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
from core.external_modules import numpy
from core.external_modules import pandas
SECONDS_IN_A_DAY = 60 * 60 * 24
def ResultsKey():
"""Create a DataFrame with information about result values."""
return pandas.DataFrame.from_records([
('P', 'PASS', '-', 0.0),
('N', 'NO DATA', ' ', 0.0),
('X', 'SKIP', '~', 0.0),
('Q', 'FAIL', 'F', 1.0),
('Y', 'NOTRUN', '?', 0.1),
('L', 'FLAKY', '!', 0.5),
], columns=('code', 'result', 'char', 'badness'), index='code')
def FilterBy(df, **kwargs):
"""Filter out a data frame by applying patterns to columns.
Args:
df: A data frame to filter.
**kwargs: Remaining keyword arguments are interpreted as column=pattern
specifications. The pattern may contain shell-style wildcards, only rows
whose value in the specified column matches the pattern will be kept in
the result. If the pattern is None, no filtering is done.
Returns:
The filtered data frame (a view on the original data frame).
"""
for column, pattern in kwargs.items():
if pattern is not None:
df = df[df[column].str.match(fnmatch.translate(pattern), case=False)]
return df
def CompactResults(results):
"""Aggregate results from multime runs into a single result code."""
def Compact(result):
if len(result) == 1:
return result # Test ran once; use same value.
elif all(r == 'Q' for r in result):
return 'Q' # All runs failed; test failed.
else:
return 'L' # Sometimes failed, sometimes not; test flaky.
return results.map({r: Compact(r) for r in results.unique()})
def AggregateBuilds(df, half_life):
"""Aggregate results from multiple builds of the same test configuration.
Also computes the "flakiness" of a test configuration.
Args:
df: A data frame with test results per build for a single test
configuration (i.e. fixed master, builder, test_type).
half_life: A number of days. Builds failures from these many days ago are
half as important as a build failing today.
"""
results_key = ResultsKey()
df = df.copy()
df['result'] = CompactResults(df['result'])
df['status'] = df['result'].map(results_key['char'])
df['flakiness'] = df['result'].map(results_key['badness'])
time_ago = df['timestamp'].max() - df['timestamp']
days_ago = time_ago.dt.total_seconds() / SECONDS_IN_A_DAY
df['weight'] = numpy.power(0.5, days_ago / half_life)
df['flakiness'] *= df['weight']
latest_build = df['build_number'].iloc[0]
grouped = df.groupby(['builder', 'test_suite', 'test_case'])
df = grouped['flakiness'].sum().to_frame()
df['flakiness'] *= 100 / grouped['weight'].sum()
df['build_number'] = latest_build
df['status'] = grouped['status'].agg(lambda s: s.str.cat())
return df
|
bsd-3-clause
|
Nyker510/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
bmazin/ARCONS-pipeline
|
astrometry/CentroidCalc.py
|
1
|
20182
|
#!/bin/python
'''
Author: Paul Szypryt Date: October 29, 2012
Loads up an h5 observation file and uses the PyGuide package to calculate the centroid of an object,
for a given frame. Requires an initial x and y pixel position guess for the centroid. Uses this guess
if PyGuide algorithm fails to find the centroid.
History:
March 25, 2013 - Now calculates the hour angle using the right ascension of the target object as a 1st
order approximation. To get more exact hour angle, would need right ascension of the center of
rotation position, but this is unknown and non-constant.
Jan 6, 2015 -ABW- pulled functionality for getting user guess for centroid, and using PyGuide, into seperate functions
'''
import numpy as np
import tables
import sys
import ephem
import PyGuide as pg
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from util.ObsFile import ObsFile
import os
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtGui import *
import hotpix.hotPixels as hp
from tables import *
from util.FileName import FileName
from photometry.PSFphotometry import PSFphotometry
from util import utils
from util.popup import PopUp
import time
# Converting between degrees and radians. Inputs and numpy functions use different units.
d2r = np.pi/180.0
r2d = 180.0/np.pi
'''
Class to allow clicking of a pixel in plt.matshow and storing the xy position of the click in an array.
Used to pick an initial guess for the centroid.
'''
class MouseMonitor():
def __init__(self):
pass
def on_click(self,event):
if event.inaxes is self.ax:
self.xyguess = [event.xdata,event.ydata]
print 'Clicked: ',self.xyguess
def on_scroll_cbar(self,event):
if event.inaxes is self.fig.cbar.ax:
increment=0.05
currentClim = self.fig.cbar.mappable.get_clim()
currentRange = currentClim[1]-currentClim[0]
if event.button == 'up':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]+increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]+increment*currentRange)
if event.button == 'down':
if QtGui.QApplication.keyboardModifiers()==QtCore.Qt.ControlModifier:
newClim = (currentClim[0]-increment*currentRange,currentClim[1])
elif QtGui.QApplication.keyboardModifiers()==QtCore.Qt.NoModifier:
newClim = (currentClim[0],currentClim[1]-increment*currentRange)
self.fig.cbar.mappable.set_clim(newClim)
self.fig.canvas.draw()
def connect(self):
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.on_click)
self.fig.cbar = self.fig.colorbar(self.handleMatshow)
cid = self.fig.canvas.mpl_connect('scroll_event', self.on_scroll_cbar)
# Function for converting arcseconds to radians.
def arcsec_to_radians(total_arcsec):
total_degrees = total_arcsec/3600.0
total_radians = total_degrees*d2r
return total_radians
# Function for converting radians to arcseconds.
def radians_to_arcsec(total_radians):
total_degrees = total_radians*r2d
total_arcsec = total_degrees*3600.0
return total_arcsec
class headerDescription(tables.IsDescription):
RA = tables.StringCol(80)
Dec = tables.StringCol(80)
nCol = tables.UInt32Col(dflt=-1)
nRow = tables.UInt32Col(dflt=-1)
# Function that save
def saveTable(centroidListFileName,paramsList,timeList,xPositionList,yPositionList,hourAngleList,flagList):
'''
Inputs:
centroidListFileName - name of centroid file. If not a full path automatically put it in $MKID_PROC_PATH/centroidListFiles/
paramsList - contains info for header
timeList - list of times at which centroids were calculated
xPositionList - list of x positions at specified times
yPositionList - list of y positions
hourAngleList - list of hour angles at specified times
flagList - flag corresponding to centroid. 0 --> good, 1 --> failed
'''
# Check to see if a Centroid List File exists with name centroidListFileName.
# If it does not exist, create a Centroid List File with that name.
if os.path.isabs(centroidListFileName) == True:
fullCentroidListFileName = centroidListFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
centroidDir = os.path.join(scratchDir,'centroidListFiles')
fullCentroidListFileName = os.path.join(centroidDir,centroidListFileName)
# Attempt to open h5 file with name fullCentroidListFileName. If cannot, throw exception.
try:
centroidListFile = tables.openFile(fullCentroidListFileName,mode='w')
except:
print 'Error: Couldn\'t create centroid list file, ',fullCentroidListFileName
return
print 'writing to', fullCentroidListFileName
# Set up and write h5 table with relevant parameters, centroid times and positions, hour angles, and flags.
headerGroupName = 'header'
headerTableName = 'header'
nRowColName = 'nRow'
nColColName = 'nCol'
RAColName = 'RA'
DecColName = 'Dec'
headerGroup = centroidListFile.createGroup("/", headerGroupName, 'Header')
headerTable = centroidListFile.createTable(headerGroup, headerTableName, headerDescription,
'Header Info')
header = headerTable.row
header[nColColName] = paramsList[0]
header[nRowColName] = paramsList[1]
header[RAColName] = paramsList[2]
header[DecColName] = paramsList[3]
header.append()
centroidgroup = centroidListFile.createGroup(centroidListFile.root,'centroidlist','Table of times, x positions, y positions, hour angles, and flags')
#paramstable = tables.Array(centroidgroup,'params', object=paramsList, title = 'Object and array params')
timestable = tables.Array(centroidgroup,'times',object=timeList,title='Times at which centroids were calculated')
xpostable = tables.Array(centroidgroup,'xPositions',object=xPositionList,title='X centroid positions')
ypostable = tables.Array(centroidgroup,'yPositions',object=yPositionList,title='Y centroid positions')
hatable = tables.Array(centroidgroup,'hourAngles',object=hourAngleList,title='Hour angles at specified times')
flagtable = tables.Array(centroidgroup,'flags',object=flagList,title='Flags whether or not guess had to be used, 1 for guess used')
centroidListFile.flush()
centroidListFile.close()
def centroidImage(image,xyguess,radiusOfSearch = 6,doDS9=True,usePsfFit=False):
'''
ABW
This function finds the centroid of a star in the image
'''
#remove any undefined values
image[np.invert(np.isfinite(image))]=0.
#Assume anywhere with 0 counts is a dead pixel
deadMask = 1.0*(image==0)
#ignore saturated mask
satMask = np.zeros((len(deadMask),len(deadMask[0])))
# Specify CCDInfo (bias,readNoise,ccdGain,satLevel)
ccd = pg.CCDInfo(0,0.00001,1,2500)
xyguessPyguide = np.subtract(xyguess,(0.5,0.5)) #account for how pyguide puts pixel coordinates
pyguide_output = pg.centroid(image,deadMask,satMask,xyguess,radiusOfSearch,ccd,0,False,verbosity=0, doDS9=doDS9) #Added by JvE May 31 2013
# Use PyGuide centroid positions, if algorithm failed, use xy guess center positions instead
try:
xycenterGuide = [float(pyguide_output.xyCtr[0]),float(pyguide_output.xyCtr[1])]
np.add(xycenterGuide,(0.5,0.5))#account for how pyguide puts pixel coordinates
flag = 0
except TypeError:
xycenterGuide = xyguess
flag = 1
xycenter=xycenterGuide
if usePsfFit:
psfPhot = PSFphotometry(image,centroid=[xycenterGuide],verbose=True)
psfDict = psfPhot.PSFfit(aper_radius=radiusOfSearch)
xycenterPsf = [psfDict['parameters'][2],psfDict['parameters'][3]]
if psfDict['flag'] == 0:
xycenter = xycenterPsf
flag = 0
else:
print 'PSF fit failed with flag: ', psfDict['flag']
#xycenter = xycenterGuide
flag = 1
outDict = {'xycenter':xycenter,'flag':flag}
if usePsfFit:
outDict['xycenterGuide'] = xycenterGuide
outDict['xycenterPsf'] = xycenterPsf
return outDict
def getUserCentroidGuess(image,norm=None):
'''
ABW
This function asks the user to click on the star in the image.
'''
flag=1
xyguess = [0,0]
map = MouseMonitor()
map.fig = plt.figure()
map.ax = map.fig.add_subplot(111)
map.ax.set_title('Centroid Guess')
map.handleMatshow = map.ax.matshow(image,cmap = plt.cm.gray, origin = 'lower', norm=norm)
map.connect()
plt.show()
#Get user to click on approx. centroid location
try:
xyguess = map.xyguess
flag=0
print 'Guess = ' + str(xyguess)
except AttributeError:
pass
return xyguess, flag
def quickCentroid(images, radiusOfSearch=10, maxMove = 4,usePsfFit=False):
'''
Author: Alex Walter
Date: Jan 6, 2015
This function finds centroids automatically on a list of images (such as an image stack).
It asks the user for a guess on the first image.
Then uses centroidImage() to find the centroid.
It uses the centroid of the previous image as the guess for the next image, etc.
If the centroiding fails or if the centroid moves too far, it asks the user for another guess
Inputs:
images - list of images
radiusOfSearch - radius in pixels around guess to look for a centroid
maxMove - max distance in pixels from previous centroid before it asks the user to make a new guess (for telescope moves)
usePsfFit - option to use PSF fitting to get centroid (usually a bit better guess)
Returns:
xPositionList
yPositionList
flagList
'''
xPositionList=np.zeros(len(images)) - 1
yPositionList=np.copy(xPositionList)
flagList = np.zeros(len(images))
flag = 1
k=-1
while flag>0:
k+=1
print k,': Looking for star...'
xyguess, flag = getUserCentroidGuess(images[k])
xPositionList[k]=xyguess[0]
yPositionList[k]=xyguess[1]
flagList[k] = flag
for i in range(k,len(images)):
if flag>0:
xyguess, flag = getUserCentroidGuess(images[i])
if flag>0:
flagList[i] = flag
print i,': No star selected'
continue
centroidDict = centroidImage(images[i],xyguess,radiusOfSearch = radiusOfSearch,doDS9=False,usePsfFit=usePsfFit)
xycenter,flag = centroidDict['xycenter'],centroidDict['flag']
if flag==0 and np.linalg.norm(np.asarray(xycenter)-np.asarray(xyguess)) < (maxMove):
#centroiding successful and didn't move too far!
xPositionList[i]=xycenter[0]
yPositionList[i]=xycenter[1]
flagList[i] = flag
xyguess=xycenter
print i,': Success! ',xycenter
continue
xyguess, flag = getUserCentroidGuess(images[i])
if flag>0:
flagList[i] = flag
print i,': Failed. No star selected'
continue
centroidDict = centroidImage(images[i],xyguess,radiusOfSearch = radiusOfSearch,doDS9=False,usePsfFit=usePsfFit)
xycenter,flag = centroidDict['xycenter'],centroidDict['flag']
xPositionList[i]=xycenter[0]
yPositionList[i]=xycenter[1]
flagList[i] = flag
xyguess=xycenter
print i,': Success! Star found. ',xycenter
return xPositionList,yPositionList,flagList
def centroidCalc(obsFile, centroid_RA, centroid_DEC, outputFileName=None, guessTime=300, integrationTime=30,
secondMaxCountsForDisplay=500, HA_offset=16.0, xyapprox=None, radiusOfSearch = 6, usePsfFit=False):
'''
Shifted bulk of Paul's 'main' level code into this function. JvE 5/22/2013
INPUTS (added by JvE):
obsFile - a pre-loaded obsfile instance.
xyapprox = [x,y] - integer x,y approximate location of the centroid to use as the initial
guess. If not provided, will display an image and wait for user to click
on the estimated centroid position.
All other inputs as previously hard-coded, currently undocumented. (See __main__ block).
'''
# Create an instance of class obsFile.
ob = obsFile
# Center of rotation positions of the array. Decided that these values weren't actually necessary.
# centerX = '30.5' #Dummy values - actually doesn't matter what's entered here.
# centerY = '30.5'
# Get array size information from obs file
gridHeight = ob.nCol
gridWidth = ob.nRow
# Create an array of array and target specific parameters to include in the output file header.
paramsList = [gridHeight,gridWidth,centroid_RA,centroid_DEC]
# Create output filename to save centroid data
if outputFileName is None:
#If nothing specified, create a default name based on the filename of the input obsFile instance.
centroidListFileName=FileName(obsFile).centroidList()
else:
centroidListFileName=outputFileName
print 'Saving to: ',centroidListFileName
centroidListFolder = os.path.dirname(centroidListFileName)
#app = QApplication(sys.argv) #Commented out for now to avoid possible issues with x-forwarding if running remotely.
#----------------------------------------------------
# Get exptime and LST from header.
exptime = ob.getFromHeader('exptime')
# Can get a more accurate LST by using unix time in header. Probably off by a few seconds at most.
original_lst = ob.getFromHeader('lst')
print 'Original LST from telescope:', original_lst
# Initial RA and LST
centroid_RA_radians = ephem.hours(centroid_RA).real
centroid_RA_arcsec = radians_to_arcsec(centroid_RA_radians)
centroid_DEC_radians = ephem.degrees(centroid_DEC).real
centroid_DEC_arcsec = radians_to_arcsec(centroid_DEC_radians)
original_lst_radians = ephem.hours(original_lst).real
original_lst_seconds = radians_to_arcsec(original_lst_radians)/15.0
# Move the lst to the midpoint of the frame rather than the start
original_lst_seconds += float(integrationTime)/2.
# Create saturated pixel mask to apply to PyGuide algorithm.
print 'Creating saturation mask...'
nFrames = int(np.ceil(float(exptime)/float(integrationTime)))
# Generate dead pixel mask, invert obsFile deadMask format to put it into PyGuide format
print 'Creating dead mask...'
deadMask = ob.getDeadPixels()
deadMask = -1*deadMask + 1
# Initialize arrays that will be saved in h5 file. 1 array element per centroid frame.
timeList=[]
xPositionList=[]
yPositionList=[]
hourAngleList=[]
flagList=[]
debugPlots = []
centroidDictList = []
flag=0
print 'Retrieving images...'
for iFrame in range(exptime):
# Integrate over the guess time. Click a pixel in the plot corresponding to the xy center guess. This will be used to centroid for the duration of guessTime.
if iFrame%guessTime == 0:
# Use obsFile to get guessTime image.
imageInformation = ob.getPixelCountImage(firstSec=iFrame, integrationTime= guessTime, weighted=True,fluxWeighted=False, getRawCount=False,scaleByEffInt=False)
image=imageInformation['image']
if xyapprox is None:
#Get user to click on approx. centroid location
# Set a normalization to make the matshow plot more intuitive.
norm = mpl.colors.Normalize(vmin=0,vmax=secondMaxCountsForDisplay*guessTime)
xyguess,flag=getUserCentroidGuess(image,norm)
else:
#Use guess supplied by caller.
xyguess = xyapprox
print 'Guess = ' + str(xyguess)
# Centroid an image that has been integrated over integrationTime.
if iFrame%integrationTime == 0:
# Use obsFile to get integrationTime image.
imageInformation = ob.getPixelCountImage(firstSec=iFrame, integrationTime= integrationTime, weighted=True,fluxWeighted=False, getRawCount=False,scaleByEffInt=False)
image=imageInformation['image']
centroidDict = centroidImage(image,xyguess,radiusOfSearch,doDS9=True,usePsfFit=usePsfFit)
xycenter,flag = centroidDict['xycenter'],centroidDict['flag']
centroidDictList.append(centroidDict)
if flag==0:
print 'Calculated [x,y] center = ' + str((xycenter)) + ' for frame ' + str(iFrame) +'.'
else:
if usePsfFit:
print 'Cannot centroid frame ' + str(iFrame) + ' by psf fit, using pyguide center instead'
else:
print 'Cannot centroid frame ' + str(iFrame) + ' by pyguide, using guess instead'
# Begin RA/DEC mapping
# Calculate lst for a given frame
current_lst_seconds = original_lst_seconds + iFrame
current_lst_radians = arcsec_to_radians(current_lst_seconds*15.0)
# Calculate hour angle for a given frame. Include a constant offset for instrumental rotation.
HA_variable = current_lst_radians - centroid_RA_radians
HA_static = HA_offset*d2r
HA_current = HA_variable + HA_static
# Make lists to save to h5 file
timeList.append(iFrame)
xPositionList.append(xycenter[0])
yPositionList.append(xycenter[1])
hourAngleList.append(HA_current)
flagList.append(flag)
# Save to h5 table
saveTable(centroidListFileName=centroidListFileName,paramsList=paramsList,timeList=timeList,xPositionList=xPositionList,yPositionList=yPositionList,hourAngleList=hourAngleList,flagList=flagList)
outDict = {'xPositionList':xPositionList,'yPositionList':yPositionList,'flagList':flagList,
'xycenterGuide':[centroidDict['xycenterGuide'] for centroidDict in centroidDictList]}
if usePsfFit:
outDict['xycenterPsf'] = [centroidDict['xycenterPsf'] for centroidDict in centroidDictList]
return outDict
# Test Function / Example
if __name__=='__main__':
# Obs file info
run = 'PAL2012'
sunsetDate='20121208'
utcDate='20121209'
centroidTimestamp = '20121209-120530'
calTimestamp = '20121209-131132'
# Specify input parameters.
centroid_RA = '09:26:38.7'
centroid_DEC = '36:24:02.4'
HA_offset = 16.0
guessTime = 300
integrationTime=30
secondMaxCountsForDisplay = 500
obsFn = FileName(run=run,date=sunsetDate,tstamp=centroidTimestamp).obs()
wfn = FileName(run=run,date=sunsetDate,tstamp=calTimestamp).calSoln()
ffn = FileName(run=run,date=sunsetDate,tstamp=calTimestamp).flatSoln()
ffn = '/Scratch/flatCalSolnFiles/20121207/flatsol_20121207.h5'
# Create ObsFile instance
ob = ObsFile(obsFn)
# Load wavelength and flat cal solutions
ob.loadWvlCalFile(wfn)
ob.loadFlatCalFile(ffn)
ob.setWvlCutoffs(3000,8000)
# Load/generate hot pixel mask file
index1 = obsFn.find('_')
index2 = obsFn.find('-')
hotPixFn = '/Scratch/timeMasks/timeMask' + obsFn[index1:]
if not os.path.exists(hotPixFn):
hp.findHotPixels(obsFn,hotPixFn)
print "Flux file pixel mask saved to %s"%(hotPixFn)
ob.loadHotPixCalFile(hotPixFn,switchOnMask=False)
print "Hot pixel mask loaded %s"%(hotPixFn)
centroidCalc(ob, centroid_RA, centroid_DEC, guessTime=300, integrationTime=30,
secondMaxCountsForDisplay=500)
|
gpl-2.0
|
ltiao/networkx
|
examples/drawing/unix_email.py
|
26
|
2678
|
#!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2005 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
|
bsd-3-clause
|
shahankhatch/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
70
|
17509
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
bsd-3-clause
|
rpmunoz/DECam
|
completeness/compute_completeness.py
|
1
|
8352
|
#! /usr/bin/env python
import warnings
warnings.filterwarnings("ignore")
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import sys,os
import subprocess
import numpy as np
import random
import time
import cv2 as cv
import pyfits
from pyfits import getheader
import multiprocessing, Queue
import ctypes
class Worker(multiprocessing.Process):
def __init__(self, work_queue, result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
i_range, psf_file = self.work_queue.get_nowait()
except Queue.Empty:
break
# the actual processing
print "Adding artificial stars - index range=", i_range
radius=16
x_c,y_c=( (psf_size[1]-1)/2, (psf_size[2]-1)/2 )
x,y=np.meshgrid(np.arange(psf_size[1])-x_c,np.arange(psf_size[2])-y_c)
distance = np.sqrt(x**2 + y**2)
for i in range(i_range[0],i_range[1]):
psf_xy=np.zeros(psf_size[1:3], dtype=float)
j=0
for i_order in range(psf_order+1):
j_order=0
while (i_order+j_order < psf_order+1):
psf_xy += psf_data[j,:,:] * ((mock_y[i]-psf_offset[1])/psf_scale[1])**i_order * ((mock_x[i]-psf_offset[0])/psf_scale[0])**j_order
j_order+=1
j+=1
psf_factor=10.**( (30.-mock_mag[i])/2.5)/np.sum(psf_xy)
psf_xy *= psf_factor
npsf_xy=cv.resize(psf_xy,(npsf_size[0],npsf_size[1]),interpolation=cv.INTER_LANCZOS4)
npsf_factor=10.**( (30.-mock_mag[i])/2.5)/np.sum(npsf_xy)
npsf_xy *= npsf_factor
im_rangex=[max(mock_x[i]-npsf_size[1]/2,0), min(mock_x[i]-npsf_size[1]/2+npsf_size[1], im_size[1])]
im_rangey=[max(mock_y[i]-npsf_size[0]/2,0), min(mock_y[i]-npsf_size[0]/2+npsf_size[0], im_size[0])]
npsf_rangex=[max(-1*(mock_x[i]-npsf_size[1]/2),0), min(-1*(mock_x[i]-npsf_size[1]/2-im_size[1]),npsf_size[1])]
npsf_rangey=[max(-1*(mock_y[i]-npsf_size[0]/2),0), min(-1*(mock_y[i]-npsf_size[0]/2-im_size[0]),npsf_size[0])]
im_data[im_rangey[0]:im_rangey[1], im_rangex[0]:im_rangex[1]] += npsf_xy[npsf_rangey[0]:npsf_rangey[1], npsf_rangex[0]:npsf_rangex[1]]
print 'Done'
self.result_queue.put(id)
# store the result
class Worker_sex(multiprocessing.Process):
def __init__(self, work_queue, result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
i_thread, i_range = self.work_queue.get_nowait()
except Queue.Empty:
break
fwhm=1.0
pixel_scale=0.263
weight_type='MAP_WEIGHT'
checkimage_type='NONE'
checkimage_file='NONE'
satur_level=4.3e5
analysis_thresh=2.0
detect_minarea=3
detect_thresh=1.4
phot_apertures=",".join(["%.2f" % x for x in 2*np.array((0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5,5.))*fwhm/pixel_scale])
filter_name='sex_config/gauss_3.0_7x7.conv'
xml_name='survey_sex.xml'
# the actual processing
log_file="survey_completeness_sex_thread%d.log" % i_thread
for i in range(i_range[0],i_range[1]):
command = "sex %s -c sex_config/ctio_decam.sex -PARAMETERS_NAME sex_config/ctio_decam_psfmodel.param -CATALOG_TYPE FITS_LDAC -CATALOG_NAME %s -SEEING_FWHM %.2f -WEIGHT_TYPE %s -WEIGHT_THRESH 0. -WEIGHT_IMAGE %s -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s -SATUR_LEVEL %d -BACKPHOTO_TYPE LOCAL -BACKPHOTO_THICK 30 -BACK_SIZE 250 -BACK_FILTERSIZE 3 -MASK_TYPE CORRECT -ANALYSIS_THRESH %.2f -DETECT_MINAREA %d -DETECT_THRESH %.2f -DEBLEND_MINCONT 0.0000001 -INTERP_TYPE ALL -INTERP_MAXXLAG 1 -INTERP_MAXYLAG 1 -FLAG_TYPE OR -FLAG_IMAGE %s -PHOT_AUTOPARAMS 2.3,4.0 -PHOT_FLUXFRAC 0.5 -PHOT_APERTURES %s -PIXEL_SCALE %.4f -FILTER Y -FILTER_NAME %s -WRITE_XML Y -XML_NAME %s -PSF_NAME %s -PSF_NMAX 1" % (mock_im_file[i], sex_cat_file[i], fwhm, weight_type, weight_file[i], checkimage_type, checkimage_file, satur_level, analysis_thresh, detect_minarea, detect_thresh, flag_file[i], phot_apertures, pixel_scale, filter_name, xml_name, psf_file[i] )
print command
with open(log_file, "a") as log:
result=subprocess.call(command, stderr=log, stdout=log, shell=True)
log.close()
print "SExtractor thread: %d - iteration: %d is done!" % (i_thread, i)
self.result_queue.put(id)
if __name__ == "__main__":
n_cpu=2
n_core=6
n_processes=n_cpu*n_core*1
input_mock_file=sys.argv[1]
input_data_dtype=np.dtype({'names':['im_file','weight_file','flag_file','psf_file','mock_mag_file','mock_im_file','sex_cat_file'],'formats':['S200','S200','S200','S200','S200','S200','S200']})
input_data=np.loadtxt(input_mock_file, skiprows=1, dtype=input_data_dtype)
im_file=input_data['im_file']
weight_file=input_data['weight_file']
flag_file=input_data['flag_file']
psf_file=input_data['psf_file']
mock_mag_file=input_data['mock_mag_file']
mock_im_file=input_data['mock_im_file']
sex_cat_file=input_data['sex_cat_file']
input_n=im_file.size
n_processes=np.minimum(n_processes,input_n)
print n_processes
for i in range(input_n):
if os.path.exists(mock_im_file[i]):
print "Removing file ", mock_im_file[i]
os.remove(mock_im_file[i])
if os.path.exists(sex_cat_file[i]):
print "Removing file ", sex_cat_file[i]
os.remove(sex_cat_file[i])
# First, add artificial stars
for i in range(0,input_n):
hdulist = pyfits.open(psf_file[i])
psf_h = hdulist[1].header
psf_data = (hdulist[1].data)[0][0]
hdulist.close()
psf_order=psf_h['POLDEG1']
psf_offset=[psf_h['POLZERO1'],psf_h['POLZERO2']]
psf_scale=[psf_h['POLSCAL1'],psf_h['POLSCAL2']]
psf_pixstep=psf_h['PSF_SAMP']
psf_size=psf_data.shape
npsf_size=(np.array(psf_size[1:3])*psf_pixstep).astype(int)
mock_data=np.loadtxt(mock_mag_file[i], skiprows=1)
mock_n=mock_data[:,0].size
mock_sort=np.argsort(mock_data[:,1])
mock_x=mock_data[mock_sort,0]
mock_y=mock_data[mock_sort,1]
mock_mag=mock_data[mock_sort,2]
print "Reading file ", im_file[i]
hdu=pyfits.open(im_file[i])
data=hdu[0].data
im_size=data.shape
im_data_base = multiprocessing.Array(ctypes.c_float, im_size[0]*im_size[1])
im_data = np.ctypeslib.as_array(im_data_base.get_obj())
im_data = im_data.reshape(im_size[0], im_size[1])
im_data[:] = data
data=0
assert im_data.base.base is im_data_base.get_obj()
# run
# load up work queue
tic=time.time()
j_step=np.int(np.ceil( mock_n*1./n_processes ))
j_range=range(0,mock_n,j_step)
j_range.append(mock_n)
work_queue = multiprocessing.Queue()
for j in range(np.size(j_range)-1):
if work_queue.full():
print "Oh no! Queue is full after only %d iterations" % j
work_queue.put( (j_range[j:j+2], psf_file[i]) )
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue()
procs=[]
# spawn workers
for j in range(n_processes):
worker = Worker(work_queue, result_queue)
procs.append(worker)
worker.start()
# collect the results off the queue
for j in range(n_processes):
result_queue.get()
for p in procs:
p.join()
print 'Final Done'
print "Writing file ", mock_im_file[i]
hdu[0].data=im_data
hdu.writeto(mock_im_file[i])
print "%f s for parallel computation." % (time.time() - tic)
# Second, run Sextractor
n_processes=n_cpu*n_core
n_processes=np.minimum(n_processes,input_n)
tic=time.time()
j_step=np.int(np.ceil( input_n*1./n_processes ))
j_range=range(0,input_n,j_step)
j_range.append(input_n)
work_queue = multiprocessing.Queue()
for j in range(np.size(j_range)-1):
if work_queue.full():
print "Oh no! Queue is full after only %d iterations" % j
work_queue.put( (j+1, j_range[j:j+2]) )
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue()
procs=[]
# spawn workers
for j in range(n_processes):
worker = Worker_sex(work_queue, result_queue)
procs.append(worker)
worker.start()
time.sleep(30)
# collect the results off the queue
for j in range(n_processes):
result_queue.get()
for p in procs:
p.join()
|
apache-2.0
|
justincassidy/scikit-learn
|
sklearn/externals/joblib/parallel.py
|
86
|
35087
|
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
|
bsd-3-clause
|
DonBeo/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
12
|
2594
|
import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from sklearn.mixture.tests.test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
|
bsd-3-clause
|
SANDAG/urbansim
|
urbansim/accounts.py
|
6
|
4003
|
"""
An Account class for tracking monetary transactions during UrbanSim runs.
"""
from collections import namedtuple
import pandas as pd
from zbox import toolz as tz
Transaction = namedtuple('Transaction', ('amount', 'subaccount', 'metadata'))
# column names that are always present in DataFrames of transactions
COLS = ['amount', 'subaccount']
def _column_names_from_metadata(dicts):
"""
Get the unique set of keys from a list of dictionaries.
Parameters
----------
dicts : iterable
Sequence of dictionaries.
Returns
-------
keys : list
Unique set of keys.
"""
return list(tz.unique(tz.concat(dicts)))
class Account(object):
"""
Keeps a record of transactions, metadata, and a running balance.
Parameters
----------
name : str
Arbitrary name for this account used in some output.
balance : float, optional
Starting balance for the account.
Attributes
----------
balance : float
Running balance in account.
"""
def __init__(self, name, balance=0):
self.name = name
self.balance = balance
self.transactions = []
def add_transaction(self, amount, subaccount=None, metadata=None):
"""
Add a new transaction to the account.
Parameters
----------
amount : float
Negative for withdrawls, positive for deposits.
subaccount : object, optional
Any indicator of a subaccount to which this transaction applies.
metadata : dict, optional
Any extra metadata to record with the transaction.
(E.g. Info about where the money is coming from or going.)
May not contain keys 'amount' or 'subaccount'.
"""
metadata = metadata or {}
self.transactions.append(Transaction(amount, subaccount, metadata))
self.balance += amount
def add_transactions(self, transactions):
"""
Add a collection of transactions to the account.
Parameters
----------
transactions : iterable
Should be tuples of amount, subaccount, and metadata as would
be passed to `add_transaction`.
"""
for t in transactions:
self.add_transaction(*t)
def total_transactions(self):
"""
Get the sum of all transactions on the account.
Returns
-------
total : float
"""
return sum(t.amount for t in self.transactions)
def total_transactions_by_subacct(self, subaccount):
"""
Get the sum of all transactions for a given subaccount.
Parameters
----------
subaccount : object
Identifier of subaccount.
Returns
-------
total : float
"""
return sum(
t.amount for t in self.transactions if t.subaccount == subaccount)
def all_subaccounts(self):
"""
Returns an iterator of all subaccounts that have a recorded transaction
with the account.
"""
return tz.unique(t.subaccount for t in self.transactions)
def iter_subaccounts(self):
"""
An iterator over subaccounts yielding subaccount name and
the total of transactions for that subaccount.
"""
for sa in self.all_subaccounts():
yield sa, self.total_transactions_by_subacct(sa)
def to_frame(self):
"""
Return transactions as a pandas DataFrame.
"""
col_names = _column_names_from_metadata(
t.metadata for t in self.transactions)
def trow(t):
return tz.concatv(
(t.amount, t.subaccount),
(t.metadata.get(c) for c in col_names))
rows = [trow(t) for t in self.transactions]
if len(rows) == 0:
return pd.DataFrame(columns=COLS + col_names)
return pd.DataFrame(rows, columns=COLS + col_names)
|
bsd-3-clause
|
ashhher3/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
6
|
24855
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
"""
Test parameters on classes that make use of libsvm.
"""
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
"""Check consistency on dataset iris."""
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
"""
Test whether SVCs work on a single sample given as a 1-d array
"""
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
"""
SVC with a precomputed kernel.
We test it with a toy dataset and with iris.
"""
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
"""
Test Support Vector Regression
"""
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
"""
Test OneClassSVM
"""
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
"""
Test OneClassSVM decision function
"""
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
"""
Make sure some tweaking of parameters works.
We change clf.dual_coef_ at run time and expect .predict() to change
accordingly. Notice that this is not trivial since it involves a lot
of C/Python copying in the libsvm bindings.
The success of this test ensures that the mapping between libsvm and
the python classifier is complete.
"""
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[.25, -.25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf.dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
"""
Predict probabilities using SVC
This uses cross validation, so we use a slightly bigger testing set.
"""
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
"""
Test decision_function
Sanity check, test that decision_function implemented in python
returns the same as the one in libsvm
"""
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_weight():
"""
Test class weights
"""
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
"""
Test weights on individual samples
"""
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
"""Test class weights for imbalanced data"""
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('auto', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
"""
Test possible parameter combinations in LinearSVC
"""
# generate list of possible parameter combinations
params = [(dual, loss, penalty) for dual in [True, False]
for loss in ['l1', 'l2', 'lr'] for penalty in ['l1', 'l2']]
X, y = make_classification(n_samples=5, n_features=5)
for dual, loss, penalty in params:
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if (loss == 'l1' and penalty == 'l1') or (
loss == 'l1' and penalty == 'l2' and not dual) or (
penalty == 'l1' and dual):
assert_raises(ValueError, clf.fit, X, y)
else:
clf.fit(X, y)
def test_linearsvc():
"""
Test basic routines using LinearSVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='l1', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
"""Test LinearSVC with crammer_singer multi-class svm"""
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
"""Test Crammer-Singer formulation in the binary case"""
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
"""
Test that LinearSVC gives plausible predictions on the iris dataset
Also, test symbolic class names (classes_).
"""
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
"""
Test that dense liblinear honours intercept_scaling param
"""
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='l2',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
"""Check that primal coef modification are not silently ignored"""
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
"""Test that SVR(kernel="linear") has coef_ with the right sign."""
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
iurilarosa/thesis
|
codici/work in progress/new with plhold/newhmplhol.py
|
1
|
3425
|
import tensorflow as tf
import numpy
import scipy.io
import time
sessione = tf.Session()
#---------------------------------------
# defining parameters |
#---------------------------------------
percorsoDati = "/home/protoss/Documenti/TESI/wn100bkp/data/dati9mesi52HWI.mat"
#times
PAR_tFft = 8192
PAR_tObs = 9 #mesi
PAR_tObs = PAR_tObs*30*24*60*60
PAR_epoch = (57722+57990)/2
#frequencies
PAR_enhance = 10
PAR_stepFreq = 1/PAR_tFft
PAR_refinedStepFreq = PAR_stepFreq/PAR_enhance
#spindowns
PAR_fdotMin = -1e-9
PAR_fdotMax = 1e-10
PAR_stepFdot = PAR_stepFreq/PAR_tObs
PAR_nstepFdot = numpy.round((PAR_fdotMax-PAR_fdotMin)/PAR_stepFdot).astype(numpy.int32)
#others
PAR_secbelt = 4000
#---------------------------------------
# loading and managing data |
#---------------------------------------
struttura = scipy.io.loadmat(percorsoDati)['job_pack_0']
#times
tempi = struttura['peaks'][0,0][0]
tempi = tempi-PAR_epoch
tempi = ((tempi)*60*60*24/PAR_refinedStepFreq)
#frequencies
frequenze = struttura['peaks'][0,0][1]
freqMin = numpy.amin(frequenze)
freqMax = numpy.amax(frequenze)
freqIniz = freqMin- PAR_stepFreq/2 - PAR_refinedStepFreq
freqFin = freqMax + PAR_stepFreq/2 + PAR_refinedStepFreq
nstepFrequenze = numpy.ceil((freqFin-freqIniz)/PAR_refinedStepFreq)+PAR_secbelt
frequenze = frequenze-freqIniz
frequenze = (frequenze/PAR_refinedStepFreq)-round(PAR_enhance/2+0.001)
#spindowns
spindowns = numpy.arange(0, PAR_nstepFdot)
spindowns = numpy.multiply(spindowns,PAR_stepFdot)
spindowns = numpy.add(spindowns, PAR_fdotMin)
#others
pesi = (struttura['peaks'][0,0][4]+1)
peakmap = numpy.stack((tempi,frequenze,pesi),1).astype(numpy.float32)
print(peakmap.shape)
spindowns = spindowns.astype(numpy.float32)
nRows = numpy.int32(PAR_nstepFdot)
nColumns = numpy.int32(nstepFrequenze)
#---------------------------------------
# defining TensorFlow graph |
#---------------------------------------
def mapnonVar(stepIesimo):
sdTimed = tf.multiply(spindownsTF[stepIesimo], tempiTF, name = "Tdotpert")
appoggio = tf.round(frequenzeTF-sdTimed+PAR_secbeltTF/2, name = "appoggioperindici")
appoggio = tf.cast(appoggio, dtype=tf.int32)
valori = tf.unsorted_segment_sum(pesiTF, appoggio, nColumns)
return valori
PAR_secbeltTF = tf.constant(4000,dtype = tf.float32, name = 'secur')
tempiTF = tf.placeholder(tf.float32, name = 't')
frequenzeTF = tf.placeholder(tf.float32, name = 'f')
pesiTF = tf.placeholder(tf.float32, name = 'w')
spindownsTF = tf.placeholder(tf.float32, name = 'sd')
#pesiTF = tf.reshape(pesiTF,(1,tf.size(pesi)))
#pesiTF = pesiTF[0]
houghLeft = tf.map_fn(mapnonVar, tf.range(0, nRows), dtype=tf.float32, parallel_iterations=8)
houghRight = houghLeft[:,PAR_enhance:nColumns]-houghLeft[:,0:nColumns - PAR_enhance]
houghDiff = tf.concat([houghLeft[:,0:PAR_enhance],houghRight],1)
houghMap = tf.cumsum(houghDiff, axis = 1)
#----------------------------------------
# feeding and running |
#----------------------------------------
dizionario = {
tempiTF : tempi,
frequenzeTF : frequenze,
pesiTF : pesi,
spindownsTF : spindowns
}
start = time.time()
image = sessione.run(houghMap, feed_dict = dizionario)
end = time.time()
print(end-start)
from matplotlib import pyplot
a = pyplot.imshow(image, aspect = 200)
pyplot.show()
|
gpl-3.0
|
dr3y/gibsonsimulator
|
stochastickinetics.py
|
1
|
10943
|
#from pylab import figure, plot, xlabel, grid, hold, legend, title, savefig
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import random
from scipy.misc import comb
import math
from copy import deepcopy as cp
from matplotlib.mlab import griddata
import numpy as np
import time
def timestep(X=[],R=[],P={},endlist=[]):
'''time until the next reaction happens, and also tells me which reaction it was'''
#X is a list of numbers of ends.
#R is a list of which ends react with which
#also it tells us what sequence of fragments the molecules have
if(X==[]):
X=[100,100,100,100]
#sum up all the values in a row
if(R==[]):
R = [[1,2,.1],[1,4,.1],[1,5,.05],
[2,1,.1],[2,4,.01],
]
#this lists two ends which can react, and their rate
#so when a reaction happens we need to remove the proper ends
if(P=={}):
P={'001_002r_003':(20,0,4),'000_003_002r':(10,5,3)}
#each polymer has (amount, leftend, rightend)
if(endlist ==[]):
endlist = [[['001_002r_003',0],['000_003_002r',1]],[['001_002r_003',1],['000_003_002r',0]]]
#[who it belongs to,whether its the right end]
A=[]
A_0 = 0
i=0
for reaction in R:
#no fancy combinatorial calcs, just
#multiply the amount of one end that reacts
#by the amount of the other end that reacts
h=X[reaction[0]]*X[reaction[1]]
ApoA = h*reaction[2]
A+=[ApoA]
A_0 += ApoA
i+=1
r1 = random.random()#+.0001
r2 = random.random()
#if(A_0 <=0.001):
# A_0 == 0.001
#mlog = math.log(1.0/r1)
#if(mlog <= 0.001):
# mlog = 0.001
Tau = 1.0/A_0*math.log(1.0/r1)
reactionchoice = r2*A_0
ApoMoo = 0.0
Moo = 0
for aval in A:
ApoMoo += aval
if(ApoMoo >= reactionchoice):
break
Moo += 1
#print 'moo'
#print Moo
reaction = R[Moo]
#now, remove both ends that reacted
#somehow we also need to record that this fragment was made and
#how many of them exist
#.0...1...2...3...4.
#0 1 2 3 4 5 6 7 8 9
#now we subtract from the ends that reacted
#print P
'''
print "here is the reaction {}".format(reaction)
print "X values {}".format([X[reaction[0]],X[reaction[1]]])
print "pre-endlist"
print endlist[reaction[0]]
print "pre-endlist"
print endlist[reaction[1]]
print "pre-counts"
c1 =[P[a[0]][0] for a in endlist[reaction[0]]]
c2 = [P[a[0]][0] for a in endlist[reaction[1]]]
print c1
print c2
if(sum(c1) != X[reaction[0]]):
print "ERRRRROOOOORRRRRR"
time.sleep(10)
if(sum(c2) != X[reaction[1]]):
print "ERRRROOOOOOORRRRRR"
time.sleep(10)
'''
pol1pick = random.randint(0,X[reaction[0]])
pol2pick = random.randint(0,X[reaction[1]])
pol1 = ''
pol2 = ''
#print X
#print P
#print "endlist reaction 1"
#print endlist[reaction[0]]
#print "endlist reaction 2"
#print endlist[reaction[1]]
for polymer in endlist[reaction[0]]:
# print "pol1pick is {}".format(pol1pick)
polamt = 0
try:
polamt = P[polymer[0]][0]
except KeyError:
continue
#if the thing we are looking for does not exist,
#remove that entry in endlist and also set polamt to zero
#print endlist[reaction[0]]
#badind = endlist[reaction[0]].index(polymer)
#del endlist[reaction[0]][badind]
#print "polamt is {}".format(polamt)
pol1pick-= polamt
if(pol1pick <= 0):
pol1 = cp(polymer)
break;
for polymer in endlist[reaction[1]]:
#print "pol2pick is {}".format(pol2pick)
polamt = 0
try:
polamt = P[polymer[0]][0]
except KeyError:
continue
#badind = endlist[reaction[0]].index(polymer)
#del endlist[reaction[0]][badind]
#print "polamt is {}".format(polamt)
pol2pick-= polamt
if(pol2pick <= 0):
pol2 = cp(polymer)
break;
#print "pol1pick {}".format(pol1pick)
#print "pol2pick {}".format(pol2pick)
# print "pol1 is {}".format(pol1)
#print "pol2 is {}".format(pol2)
#decide which one you need to split
flipp1 = not pol1[1]
flipp2 = pol2[1]
p1str = pol1[0]#.split('_')
p2str = pol2[0]#.split('_')
#construct the new polymer code
leftend = P[pol1[0]][1]
rightend = P[pol2[0]][2]
if(flipp1 and flipp2):
newpol = '{}_{}'.format(p2str,p1str)
leftend = P[pol2[0]][1]
rightend = P[pol1[0]][2]
else:
if(flipp1):
leftend = P[pol1[0]][2]
p1str = rcPolymer(p1str)
if(flipp2):
rightend = P[pol2[0]][1]
p2str = rcPolymer(p2str)
newpol = '{}_{}'.format(p1str,p2str)
#reactants are gone
P[pol1[0]] = (P[pol1[0]][0]-1,P[pol1[0]][1],P[pol1[0]][2])
if(P[pol1[0]][0]==0): #if there are zero left, delete it
del P[pol1[0]]
if(pol2[0] != pol1[0]):
P[pol2[0]] = (P[pol2[0]][0]-1,P[pol2[0]][1],P[pol2[0]][2])
if(P[pol2[0]][0]==0):
del P[pol2[0]]
#ends are gone
X[reaction[0]]-=1
X[reaction[1]]-=1
#print "reaction!"
#new polymer is recorded
reverse = False
npolrc = rcPolymer(newpol)
try:
P[newpol] =(P[newpol][0]+ 1,P[newpol][1],P[newpol][2])
#print "found forwards"
except KeyError:
try:
npolrc = rcPolymer(newpol)
P[npolrc]=(P[npolrc][0] + 1,P[npolrc][1],P[npolrc][2])
reverse=True
#print "found RC"
except KeyError:
P[newpol] = (1,leftend,rightend)
# print "found nothing"
if(reverse):
newpol = rcPolymer(newpol)
lend = leftend
rend = rightend
leftend = rend
rightend = lend
#new ends are recorded
# print "appended!!"
lp = (newpol,0)
rp = (newpol,1)
if(not lp in endlist[leftend]):
endlist[leftend].append(lp)
if(not rp in endlist[rightend]):
endlist[rightend].append(rp)
#print X
'''
print "after reaction"
print "X-values {}".format([X[reaction[0]],X[reaction[1]]])
print "new polymer {} {}".format(newpol, P[newpol])
print "endlists"
print endlist[leftend]
print endlist[rightend]
print "post-counts"
c1 =[P[a[0]][0] for a in endlist[reaction[0]]]
c2 = [P[a[0]][0] for a in endlist[reaction[1]]]
print c1
print c2
if(sum(c1) != X[reaction[0]]):
print "ERRRRROOOOORRRRRR"
time.sleep(10)
if(sum(c2) != X[reaction[1]]):
print "ERRRROOOOOOORRRRRR"
time.sleep(10)
'''
return Tau,X,endlist,P
#"""
def rcPolymer(polinpt):
# print "input {}".format(polinpt)
polinpt = polinpt.split("_")
polstr = polinpt[::-1]
for e in range(len(polstr)):
#print polstr
if(polstr[e][-1]=='r'):
polstr[e] = polstr[e][:-1]
else:
polstr[e] = polstr[e]+'r'
polstr = "_".join(polstr)
#print polstr
return polstr
def avgRuns(runscore):
'''compute the average plot for a lot of different plots'''
binsize = 0.01
time = 1.0
tlist = []
alist = []
blist = []
clist = []
for run in runscore:
i=0
for reaction in range(len(run[0])):
time = run[0][reaction]
conca = run[1][reaction]
concb = run[2][reaction]
concc = run[3][reaction]
if(len(tlist)<i+1):
tlist+=[[time]]
else:
tlist[i]+=[time]
if(len(alist)<i+1):
alist+=[[conca]]
else:
alist[i]+=[conca]
if(len(blist)<i+1):
blist+=[[concb]]
else:
blist[i]+=[concb]
if(len(clist)<i+1):
clist+=[[concc]]
else:
clist[i]+=[concc]
i+=1
outlist = [[float(sum(a))/len(a) for a in tlist],
[float(sum(a))/len(a) for a in alist],
[float(sum(a))/len(a) for a in blist],
[float(sum(a))/len(a) for a in clist]]
return outlist
def partC():
t=[stoptime * float(i) / (numpoints-1.0) for i in range(numpoints)]
wsol = odeint(autocat,w0,t,args=(rateconstants,))#,atol=abserr,rtol=relerr)
#plt.figure()
#print wsol
X=[]
Y=[]
Z=[]
for el in wsol:
X += [el[0]]
Y+=[el[1]]
Z+=[el[2]]
print len(t)
print len(X)
plt.plot(t,X,'--')
plt.plot(t,Y,'--')
plt.plot(t,Z,'--')
plt.legend(['A','B','Z'])
#plt.show()
def partD(runscore = []):
time = 1.0 #1 second
X = [1,1,100]
#X[2] = int(random.random()*20+90)
accX = [cp(X)]
accT = [0]
worktime = 0.0
while worktime < time:
t,newX = timestep(X)
accX+=[cp(newX)]
X = newX
worktime+=t
accT+=[worktime]
#print accX
#print levelslist
x1 = [a[0] for a in accX]
x2 = [a[1] for a in accX]
x3 = [a[2] for a in accX]
run = [accT,x1,x2,x3]
runscore+=[run]
plt.plot(accT,x1)
plt.plot(accT,x2)
plt.plot(accT,x3)
plt.legend(['A','B','Z'])
return runscore
def partE():
runscore = []
for a in range(100):
runscore = partD(runscore)
print len(runscore)
avruns = avgRuns(runscore)
plt.axis([0.0,1.0,0,100])
plt.show()
plt.plot(avruns[0],avruns[1],'--')
plt.plot(avruns[0],avruns[2],'--')
plt.plot(avruns[0],avruns[3],'--')
plt.legend(['A','B','Z'])
return runscore
def partF(runscore):
finalVals=[[],[]]
for run in runscore:
finalVals[0]+=[run[1][-1]]
finalVals[1]+=[run[2][-1]]
#z = griddata(
z = plt.hist2d(finalVals[0],finalVals[1],bins=11)[0]
plt.show()
x = [a*10 for a in range(11)]#finalVals[0]
y = [a*10 for a in range(11)]#finalVals[1]
print x
print y
print z
plt.contourf(x,y,z)
plt.colorbar()
#hist2d(finalVals[0],finalVals[1],bins=25)
if(__name__=="__main__"):
w0 = [1,1,100] #initial conditions of variables
rateconstants = [0.09,0.09] #rate constants
abserr = 1.0e-8
relerr = 1.0e-6
stoptime = 1.0
numpoints = 250
#runscore = partE()
partC()
plt.xlabel("time")
plt.ylabel("Number of molecules")
plt.axis([0.0,1.0,0,100])
plt.show()
'''
partF(runscore)
plt.xlabel("A")
plt.ylabel("B")
plt.axis([0,100,0,100])
plt.show()'''
|
mit
|
pnedunuri/scikit-learn
|
sklearn/utils/__init__.py
|
79
|
14202
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
|
bsd-3-clause
|
jseabold/scikit-learn
|
benchmarks/bench_plot_neighbors.py
|
287
|
6433
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
|
bsd-3-clause
|
466152112/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
acbecker/BART
|
tests/test_bart_step.py
|
1
|
7134
|
__author__ = 'brandonkelly'
import unittest
import numpy as np
from scipy import stats, integrate
from tree import *
import matplotlib.pyplot as plt
from test_tree_parameters import build_test_data, SimpleBartStep
class StepTestCase(unittest.TestCase):
def setUp(self):
nsamples = 1000
nfeatures = 4
self.alpha = 0.95
self.beta = 2.0
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.sigsqr0 = 0.7 ** 2
self.true_sigsqr = self.sigsqr0
ngrow_list = [4, 7]
self.mtrees = 2
forest, mu_list = build_test_data(self.X, self.sigsqr0, ngrow_list, self.mtrees)
# forest = [forest]
# mu_list = [mu_list]
self.y = forest[0].y
self.y0 = forest[0].y.copy()
# Rescale y to lie between -0.5 and 0.5
self.ymin = self.y.min()
self.ymax = self.y.max()
self.y = (self.y - self.ymin) / (self.ymax - self.ymin) - 0.5
self.true_sigsqr /= (self.ymax - self.ymin) ** 2
for tree in forest:
tree.y = self.y # make sure tree objects have the transformed data
self.sigsqr = BartVariance(self.X, self.y)
self.sigsqr.value = self.true_sigsqr
self.mu_list = []
self.forest = []
self.mu_map = np.zeros(len(self.y))
self.nleaves = np.zeros(self.mtrees)
self.nbranches = np.zeros(self.mtrees)
id = 1
for tree, mu in zip(forest, mu_list):
self.nleaves[id-1] = len(tree.terminalNodes)
self.nbranches[id-1] = len(tree.internalNodes)
# rescale mu values since we rescaled the y values
mu = mu / (self.ymax - self.ymin) - 1.0 / self.mtrees * (self.ymin / (self.ymax - self.ymin) + 0.5)
mean_param = BartMeanParameter("mu " + str(id), self.mtrees)
mean_param.value = mu
mean_param.sigsqr = self.sigsqr
# Tree parameter object, note that this is different from a BaseTree object
tree_param = BartTreeParameter('tree ' + str(id), self.X, self.y, self.mtrees, self.alpha, self.beta,
mean_param.mubar, mean_param.prior_var)
tree_param.value = tree
mean_param.treeparam = tree_param # this tree parameter, mu needs to know about it for the Gibbs sampler
tree_param.sigsqr = self.sigsqr
# update moments of y-values in each terminal node since we transformed the data
for leaf in tree_param.value.terminalNodes:
tree_param.value.filter(leaf)
self.mu_list.append(mean_param)
self.forest.append(tree_param)
self.mu_map += BartStep.node_mu(tree, mean_param)
id += 1
self.bart_step = BartStep(self.y, self.forest, self.mu_list, report_iter=5000)
self.sigsqr.bart_step = self.bart_step
def tearDown(self):
del self.X
del self.y
del self.mu_list
del self.forest
del self.bart_step
def test_node_mu(self):
for tree, mu in zip(self.forest, self.mu_list):
mu_map = BartStep.node_mu(tree.value, mu)
n_idx = 0
for leaf in tree.value.terminalNodes:
in_node = tree.value.filter(leaf)[1]
for i in xrange(sum(in_node)):
self.assertAlmostEquals(mu_map[in_node][i], mu.value[n_idx])
n_idx += 1
def test_do_step(self):
# first make sure data is constructed correctly as a sanity check
resids = self.mu_map - self.y
zscore = np.abs(np.mean(resids)) / (np.std(resids) / np.sqrt(resids.size))
self.assertLess(zscore, 3.0)
frac_diff = np.abs(resids.std() - np.sqrt(self.true_sigsqr)) / np.sqrt(self.true_sigsqr)
self.assertLess(frac_diff, 0.05)
# make sure that when BartStep does y -> resids, that BartMeanParameter knows about the updated node values
self.bart_step.trees[0].value.y = resids
n_idx = 0
for leaf in self.bart_step.trees[0].value.terminalNodes:
ybar_old = leaf.ybar
in_node = self.bart_step.trees[0].value.filter(leaf)
mu_leaf = self.bart_step.mus[0].treeparam.value.terminalNodes[n_idx]
self.assertAlmostEqual(leaf.ybar, mu_leaf.ybar)
self.assertNotAlmostEqual(leaf.ybar, ybar_old)
n_idx += 1
def test_step_mcmc(self):
# Tests:
# 1) Make sure that the y-values are updated, i.e., tree.y != resids
# 2) Make sure that the true mu(x) values are contained within the 95% credibility interval 95% of the time
# 3) Make sure that the number of internal and external nodes agree with the true values at the 95% level.
#
# The tests are carried out using an MCMC sampler that keeps the Variance parameter fixed.
burnin = 2000
niter = 10000
msg = "Stored y-values in each tree not equal original y-values, BartStep may have changed these internally."
for i in xrange(burnin):
self.bart_step.do_step()
for tree in self.forest:
self.assertTrue(np.all(tree.y == self.y), msg=msg)
mu_map = np.zeros((self.y.size, niter))
nleaves = np.zeros((niter, self.mtrees))
nbranches = np.zeros((niter, self.mtrees))
rsigma = np.zeros(niter)
print 'Running MCMC sampler...'
for i in xrange(niter):
self.bart_step.do_step()
# save MCMC draws
m = 0
ypredict = 0.0
for tree, mu in zip(self.forest, self.mu_list):
mu_map[:, i] += self.bart_step.node_mu(tree.value, mu)
ypredict += mu_map[:, i]
nleaves[i, m] = len(tree.value.terminalNodes)
nbranches[i, m] = len(tree.value.internalNodes)
m += 1
# transform predicted y back to original scale
ypredict = self.ymin + (self.ymax - self.ymin) * (ypredict + 0.5)
rsigma[i] = np.std(ypredict - self.y0)
# make sure we recover the true tree configuration
for m in xrange(self.mtrees):
ntrue = np.sum(nbranches[nleaves[:, m] == self.nleaves[m], m] == self.nbranches[m])
ntrue_fraction = ntrue / float(niter)
self.assertGreater(ntrue_fraction, 0.05)
# make sure we recover the correct values of mu(x)
mu_map_hi = np.percentile(mu_map, 97.5, axis=1)
mu_map_low = np.percentile(mu_map, 2.5, axis=1)
out = np.logical_or(self.mu_map > mu_map_hi, self.mu_map < mu_map_low)
nout = np.sum(out) # number outside of 95% probability region
# compare number that fell outside of 95% probability region with expectation from binomial distribution
signif = 1.0 - stats.distributions.binom(self.y.size, 0.05).cdf(nout)
print nout
msg = "Probability of number of mu(x) values outside of 95% probability range is < 1%."
self.assertGreater(signif, 0.01, msg=msg)
if __name__ == "__main__":
unittest.main()
|
mit
|
mblondel/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
hitszxp/scikit-learn
|
examples/ensemble/plot_forest_iris.py
|
335
|
6271
|
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
|
bsd-3-clause
|
Mocha2007/mochalib
|
mochaastro2.py
|
1
|
83813
|
from math import acos, atan, atan2, cos, erf, exp, inf, isfinite, log, log10, pi, sin, tan
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Circle, Patch
from mpl_toolkits.mplot3d import Axes3D
from datetime import datetime, timedelta
from mochaunits import Angle, Length, Mass, Time, pretty_dim # Angle is indeed used
from typing import Callable, Dict, Optional, Tuple
# pylint: disable=E1101,W0612
# module has no member; unused variable
# pylint bugs out with pygame, and I want var names for some unused vars,
# in case i need them in the future
# constants
epoch = datetime(2000, 1, 1, 11, 58, 55, 816) # https://en.wikipedia.org/wiki/Epoch_(astronomy)#Julian_years_and_J2000
g = 6.674e-11 # m^3 / (kg*s^2); appx; standard gravitational constant
c = 299792458 # m/s; exact; speed of light
L_0 = 3.0128e28 # W; exact; zero point luminosity
L_sun = 3.828e26 # W; exact; nominal solar luminosity
lb = 0.45359237 # kg; exact; pound
minute = 60 # s; exact; minute
hour = 3600 # s; exact; hour
day = 86400 # s; exact; day
year = 31556952 # s; exact; gregorian year
jyear = 31536000 # s; exact; julian year
deg = pi/180 # rad; exact; degree
arcmin = deg/60 # rad; exact; arcminute
arcsec = arcmin/60 # rad; exact; arcsecond
atm = 101325 # Pa; exact; atmosphere
ly = c * jyear # m; exact; light-year
au = 149597870700 # m; exact; astronomical unit
pc = 648000/pi * au # m; exact; parsec
mi = 1609.344 # m; exact; mile
G_SC = L_sun / (4*pi*au**2) # W/m^2; exact*; solar constant;
# * - technically not b/c this is based on visual luminosity rather than bolometric
gas_constant = 8.31446261815324 # J/(Kmol); exact; ideal gas constant
N_A = 6.02214076e23 # dimensionless; exact; Avogadro constant
# functions
def axisEqual3D(ax: Axes3D) -> None:
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def linear_map(interval1: Tuple[float, float], interval2: Tuple[float, float]) -> Callable[[float], float]:
"""Create a linear map from one interval to another"""
return lambda x: (x - interval1[0]) / (interval1[1] - interval1[0]) * (interval2[1] - interval2[0]) + interval2[0]
def resonance_probability(mismatch: float, outer: int) -> float:
"""Return probability a particular resonance is by chance rather than gravitational"""
return 1-(1-abs(mismatch))**outer
def synodic(p1: float, p2: float) -> float:
"""synodic period of two periods (s)"""
return p1*p2/abs(p2-p1) if p2-p1 else inf
# classes
class Orbit:
def __init__(self, **properties) -> None:
self.properties = properties
@property
def a(self) -> float:
"""Semimajor Axis (m)"""
return self.properties['sma']
@property
def aop(self) -> float:
"""Argument of periapsis (radians)"""
return self.properties['aop']
@property
def apo(self) -> float:
"""Apoapsis (m)"""
return (1+self.e)*self.a
@property
def copy(self):
# type: (Orbit) -> Orbit
from copy import deepcopy
return deepcopy(self)
@property
def e(self) -> float:
"""Eccentricity (dimensionless)"""
return self.properties['e']
@property
def i(self) -> float:
"""Inclination (radians)"""
return self.properties['i']
@property
def lan(self) -> float:
"""Longitude of the ascending node (radians)"""
return self.properties['lan']
@property
def L_z(self) -> float:
"""L_z Kozai constant (dimensionless)"""
return (1-self.e**2)**.5*cos(self.i)
@property
def man(self) -> float:
"""Mean Anomaly (radians)"""
return self.properties['man']
@property
def mean_longitude(self) -> float:
"""Mean Longitude (radians)"""
return self.lan + self.aop + self.man
@property
def orbit_tree(self):
# type: (Orbit) -> Tuple[Body, ...]
if 'parent' not in self.properties:
return tuple()
o = [self.parent]
if 'orbit' in self.parent.properties:
o += self.parent.orbit.orbit_tree
return tuple(o)
@property
def orbital_energy(self) -> float:
"""Specific orbital energy (J)"""
return -self.parent.mu/(2*self.a)
@property
def p(self) -> float:
"""Period (seconds)"""
return 2*pi*(self.a**3/self.parent.mu)**.5
@property
def parent(self):
# type: (Orbit) -> Body
"""Parent body"""
return self.properties['parent']
@property
def peri(self) -> float:
"""Periapsis (m)"""
return (1-self.e)*self.a
@property
def peri_shift(self) -> float:
"""Periapsis shift per revolution (rad)"""
# https://en.wikipedia.org/wiki/Tests_of_general_relativity#Perihelion_precession_of_Mercury
return 24*pi**3*self.a**2 / (self.p**2 * c**2 * (1-self.e**2))
@property
def v(self) -> float:
"""Mean orbital velocity (m/s)"""
return (self.a/self.parent.mu)**-.5
@property
def v_apo(self) -> float:
"""Orbital velocity at apoapsis (m/s)"""
if self.e == 0:
return self.v
e = self.e
return ((1-e)*self.parent.mu/(1+e)/self.a)**.5
@property
def v_peri(self) -> float:
"""Orbital velocity at periapsis (m/s)"""
if self.e == 0:
return self.v
e = self.e
return ((1+e)*self.parent.mu/(1-e)/self.a)**.5
# double underscore methods
def __gt__(self, other) -> bool:
return other.apo < self.peri
def __lt__(self, other) -> bool:
return self.apo < other.peri
def __str__(self) -> str:
bits = [
'<Orbit',
'Parent: {parent}',
'a: {sma}',
'e: {e}',
'i: {i}',
'Omega: {lan}',
'omega: {aop}',
'M: {man}',
]
return '\n\t'.join(bits).format(**self.properties)+'\n>'
def at_time(self, t: float):
# type: (Orbit, float) -> Orbit
"""Get cartesian orbital parameters (m, m, m, m/s, m/s, m/s)"""
new = self.copy
new.properties['man'] += t/self.p * 2*pi
new.properties['man'] %= 2*pi
return new
def cartesian(self, t: float = 0) -> Tuple[float, float, float, float, float, float]:
"""Get cartesian orbital parameters (m, m, m, m/s, m/s, m/s)"""
# ~29μs avg.
# https://downloads.rene-schwarz.com/download/M001-Keplerian_Orbit_Elements_to_Cartesian_State_Vectors.pdf
# 2 GOOD eccentric anomaly
E = self.eccentric_anomaly(t)
# 3 true anomaly
nu = self.true_anomaly(t)
# 4 distance to central body
a, e = self.a, self.e
r_c = a*(1-e*cos(E))
# 5 get pos and vel vectors o and o_
mu = self.parent.mu
o = tuple(r_c*i for i in (cos(nu), sin(nu), 0))
o_ = tuple(((mu*a)**.5/r_c)*i for i in (-sin(E), (1-e**2)**.5*cos(E), 0))
# transform o, o_ into inertial frame
i = self.i
omega, Omega = self.aop, self.lan
co, C, s, S = cos(omega), cos(Omega), sin(omega), sin(Omega)
def R(x: Tuple[float, float, float]) -> Tuple[float, float, float]:
return (
x[0]*(co*C - s*cos(i)*S) - x[1]*(s*C + co*cos(i)*S),
x[0]*(co*S + s*cos(i)*C) + x[1]*(co*cos(i)*C - s*S),
x[0]*(s*sin(i)) + x[1]*(co*sin(i))
)
r, r_ = R(o), R(o_)
# print([i/au for i in o], [i/au for i in r])
return r + r_
def close_approach(self, other, t: float = 0, n: float = 1, delta_t_tolerance: float = 1, after_only=True) -> float:
"""Get close approach time between two orbits after epoch t, searching +/-n orbits of self. (s)"""
# ~5ms total to compute, at least for earth -> mars
delta_t = self.p * n
if delta_t < delta_t_tolerance:
return t
d_0 = self.distance_to(other, t)
d_aft = self.distance_to(other, t + delta_t)
d_bef = self.distance_to(other, t - delta_t)
# print(d_bef, d_0, d_aft)
if d_bef > d_aft < d_0: # the best point must be between middle and positive end
# print('aft')
return self.close_approach(other, t+delta_t/2, n/2, delta_t_tolerance, after_only)
if not after_only and d_bef < d_0: # the best point must be between middle and negative end
# print('bef')
return self.close_approach(other, t-delta_t/2, n/2, delta_t_tolerance)
# the best point must be near middle
# print('mid')
return self.close_approach(other, t, n/2, delta_t_tolerance, after_only)
def crosses(self, other) -> bool:
"""do two orbits cross?"""
return self.peri < other.peri < self.apo or other.peri < self.peri < other.apo
def distance(self, other) -> Tuple[float, float]:
# type: (Orbit, Orbit) -> Tuple[float, float]
"""Min and max distances (m)"""
# NEW FEATURE: it can now do planet => moon or any other relationship!
ot1, ot2 = self.orbit_tree, other.orbit_tree
assert ot1[-1] == ot2[-1]
if ot1[0] == ot2[0]:
ds = abs(self.peri - other.apo), abs(self.apo - other.peri), \
abs(self.peri + other.apo), abs(self.apo + other.peri)
return min(ds), max(ds)
if 1 < len(ot2) and ot1[0] == ot2[1]: # planet, moon
if self == ot2[0]: # a planet and its moon
return other.peri, other.apo
# a planet and the moon of another
dmin, dmax = self.distance(other.parent.orbit)
dmin -= other.apo
dmax += other.apo
return dmin, dmax
if 1 < len(ot1) and ot1[1] == ot2[0]: # moon, planet
return other.distance(self)
if len(ot1) > 1 < len(ot2) and ot1[1] == ot2[1]: # moon, moon
dmin, dmax = self.parent.orbit.distance(other.parent.orbit)
dmin -= self.apo + other.apo
dmax += self.apo + other.apo
return dmin, dmax
raise NotImplementedError('this type of orbital relationship is not supported')
def distance_to(self, other, t: float) -> float:
# type: (Orbit, Orbit, float) -> float
"""Distance between orbits at time t (m)"""
# ~65μs avg.
a, b = self.cartesian(t)[:3], other.cartesian(t)[:3]
return sum((i-j)**2 for i, j in zip(a, b))**.5
def eccentric_anomaly(self, t: float = 0) -> float:
"""Eccentric anomaly (radians)"""
# ~6μs avg.
# get new anomaly
tau = 2*pi
tol = 1e-10
e, p = self.e, self.p
# dt = day * t
M = (self.man + tau*t/p) % tau
assert isfinite(M)
# E = M + e*sin(E)
E = M
while 1: # ~2 digits per loop
E_ = M + e*sin(E)
if abs(E-E_) < tol:
return E
E = E_
def mean_anomaly_delta(self, t: float = 0) -> float:
"""Change in mean anomaly over a period of time"""
return ((t / self.p) % 1) * 2*pi
def get_resonance(self, other, sigma: int = 3):
# type: (Orbit, Orbit, int) -> Tuple[int, int]
"""Estimate resonance from periods, n-sigma certainty (outer, inner)"""
# ~102μs avg.
q = self.p / other.p
if 1 < q:
return other.get_resonance(self, sigma)
outer = 0
best = 0, 0, 1
while 1:
outer += 1
inner = round(outer / q)
d = outer/inner - q
p = resonance_probability(d, outer)
if p < best[2]:
# print('\t {0}:{1}\t-> {2}'.format(outer, inner, p))
best = outer, inner, p
# certain?
if best[2] < 1 - erf(sigma/2**.5):
break
return best[:2]
def phase_angle(self, other) -> float:
"""Phase angle for transfer orbits, fraction of other's orbit (rad)"""
# https://forum.kerbalspaceprogram.com/index.php?/topic/62404-how-to-calculate-phase-angle-for-hohmann-transfer/#comment-944657
d, h = other.a, self.a
return pi/((d/h)**1.5)
def plot(self) -> None:
"""Plot orbit with pyplot"""
n = 1000
ts = [i*self.p/n for i in range(n)]
cs = [self.cartesian(t) for t in ts]
xs, ys, zs, vxs, vys, vzs = zip(*cs)
fig = plt.figure(figsize=(7, 7))
ax = Axes3D(fig)
plt.cla()
ax.set_title('Orbit')
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
ax.set_zlabel('z (m)')
ax.plot(xs+(xs[0],), ys+(ys[0],), zs+(zs[0],), color='k', zorder=1)
ax.scatter(0, 0, 0, marker='*', color='y', s=50, zorder=2)
ax.scatter(xs[0], ys[0], zs[0], marker='o', color='b', s=15, zorder=3)
axisEqual3D(ax)
plt.show()
def relative_inclination(self, other) -> float:
"""Relative inclination between two orbital planes (rad)"""
t, p, T, P = self.i, self.lan, other.i, other.lan
# vectors perpendicular to orbital planes
v_self = np.array([sin(t)*cos(p), sin(t)*sin(p), cos(t)])
v_other = np.array([sin(T)*cos(P), sin(T)*sin(P), cos(T)])
return acos(np.dot(v_self, v_other))
def resonant(self, ratio: float):
# type: (Orbit, float) -> Orbit
"""Get a resonant orbit from this one"""
p = {key: value for key, value in self.properties.items()}
p['sma'] = self.a * ratio**(2/3)
return Orbit(**p)
def stretch_to(self, other):
# type: (Orbit, Orbit) -> Orbit
"""Stretch one orbit to another, doing the minimum to make them cross"""
# ~156μs avg.
# if already crossing
if self.peri <= other.peri <= self.apo or self.peri <= other.apo <= self.apo:
return self # nothing to do, already done!
new = self.copy
# if self is inferior to other
if self.a < other.a:
# increase apo to other.peri
apo, peri = other.peri, self.peri
# else must be superior
else:
# decrease peri to other.apo
apo, peri = self.apo, other.apo
a, e = apsides2ecc(apo, peri)
new.properties['sma'] = a
new.properties['e'] = e
return new
def synodic(self, other) -> float:
# type: (Orbit, Orbit) -> float
"""Synodic period of two orbits (s)"""
return synodic(self.p, other.p)
def t_collision(self, other) -> float:
# type: (Orbit, Body) -> float
"""Collision timescale: other is a body object orbiting the same primary, out in (s)"""
# Hamilton and Burns (Science 264, 550-553, 1994)
# U_r / U is baaaasically 1, right? :^)
assert self.crosses(other.orbit)
line1 = pi * (sin(self.i)**2 + sin(other.orbit.i)**2)**.5
line2 = (other.orbit.a / other.radius)**2 * self.p
timescale = line1*line2
v_col = self.e * other.orbit.v
if v_col < other.v_e:
# from personal correspondence with D. Hamilton
correction_factor = 1 + (other.v_e/v_col)**2
return timescale / correction_factor
return timescale
def t_kozai(self, other) -> float:
# type: (Orbit, Body) -> float
"""Kozai oscillation timescale (s)"""
# other is type Body
e, M, m_2, p, p_2 = other.orbit.e, self.parent.mass, other.mass, self.p, other.orbit.p
return M/m_2*p_2**2/p*(1-e**2)**1.5
def tisserand(self, other) -> float:
# type: (Orbit, Orbit) -> float
"""Tisserand's parameter (dimensionless)"""
a, a_P, e, i = self.a, other.a, self.e, self.relative_inclination(other)
return a_P/a + 2*cos(i) * (a/a_P * (1-e**2))**.5
# """Compute optimal transfer burn (m/s, m/s, m/s, s)"""
"""
def transfer(self, other, t: float = 0,
delta_t_tol: float = 1, delta_x_tol: float = 1e7, dv_tol: float = .1) -> Tuple[float, float, float, float]:
raise NotImplementedError('DO NOT USE THIS. It NEVER works, and takes ages to compute.')
# initial guess needs to be "bring my apo up/peri down to the orbit
initial_guess = self.stretch_to(other)
System(*[Body(orbit=i) for i in (initial_guess, self, other)]).plot2d()
time_at_guess = initial_guess.close_approach(other, t, 1, delta_t_tol)
dv_of_guess = tuple(j-i for i, j in zip(self.cartesian(t), initial_guess.cartesian(t)))[-3:]
dv_best = dv_of_guess + (t,) # includes time
# compute quality of initial guess
old_close_approach_dist = initial_guess.distance_to(other, time_at_guess)
# order of deltas to attempt
base_order = (
(dv_tol, 0, 0),
(-dv_tol, 0, 0),
(0, dv_tol, 0),
(0, -dv_tol, 0),
(0, 0, dv_tol),
(0, 0, -dv_tol),
)
delta_v_was_successful = True
while delta_v_was_successful:
if old_close_approach_dist < delta_x_tol: # success!
# print('it finally works!')
print('{0} < {1}'.format(*(Length(i, 'astro') for i in (old_close_approach_dist, delta_x_tol))))
System(*[Body(orbit=i) for i in (burn_orbit, self, other)]).plot()
return dv_best
# try to change the time FIRST
mul = 2**16
while mul:
# check if changing the time helps
dt_mod = delta_t_tol*mul
# start by testing if adding a minute dt improves close approach
dt = dv_best[3]+dt_mod
burn_orbit = self.at_time(dt)
# now, to check if burn_orbit makes it closer...
new_close_approach_time = burn_orbit.close_approach(other, dt, 1, delta_t_tol)
new_close_approach_dist = burn_orbit.distance_to(other, new_close_approach_time)
if new_close_approach_dist < old_close_approach_dist:
print(Length(new_close_approach_dist, 'astro'), Length(old_close_approach_dist, 'astro'))
# System(*[Body(orbit=i) for i in (burn_orbit, self, other)]).plot2d
# good! continue along this path, then.
print('good!', (0, 0, 0, dt_mod), '@', mul)
dv_best = dv_best[:3] + (dt,)
old_close_approach_dist = new_close_approach_dist
continue
# make mul neg if pos, make mul pos and halved if neg
mul = -mul
if 0 < mul:
mul >>= 1
delta_v_was_successful = False
for modifiers in base_order:
print('MODIFIERS', modifiers)
mul = 2**13
while 1 <= mul:
dvx_mod, dvy_mod, dvz_mod = tuple(i*mul for i in modifiers)
# start by testing if adding a minute dx improves close approach
dvx, dvy, dvz, dt = dv_best[0]+dvx_mod, dv_best[1]+dvy_mod, dv_best[2]+dvz_mod, dv_best[3]
old_cartesian = self.cartesian(dt)
x, y, z, vx, vy, vz = old_cartesian
new_cartesian = old_cartesian[:3] + (vx+dvx, vy+dvy, vz+dvz)
burn_orbit = keplerian(self.parent, new_cartesian).at_time(-dt)
# when t=0 for this orbit, the real time is dt, so we need to reverse it by dt seconds
# print(self, burn_orbit)
# now, to check if burn_orbit makes it closer...
try:
new_close_approach_time = burn_orbit.close_approach(other, dt, 1, delta_t_tol)
except AssertionError:
mul >>= 1
continue
new_close_approach_dist = burn_orbit.distance_to(other, new_close_approach_time)
if new_close_approach_dist < old_close_approach_dist:
print(Length(new_close_approach_dist, 'astro'), Length(old_close_approach_dist, 'astro'))
# System(*[Body(orbit=i) for i in (burn_orbit, self, other)]).plot2d
# good! continue along this path, then.
print('good!', (dvx_mod, dvy_mod, dvz_mod, 0), '@', mul)
dv_best = dvx, dvy, dvz, dt
old_close_approach_dist = new_close_approach_dist
delta_v_was_successful = True
break
# multiplier is too big!
# print('old mul was', mul)
mul >>= 1
if delta_v_was_successful:
break
print('Transfer failed...', Length(old_close_approach_dist, 'astro'))
# autopsy
try:
System(*[Body(orbit=i) for i in (burn_orbit, self, other)]).plot()
except AssertionError:
print(initial_guess, burn_orbit)
errorstring = '\n'.join((
'Arguments do not lead to a transfer orbit.',
'Perhaps you set your tolerances too high/low?',
'{0} < {1}'.format(*(Length(i, 'astro') for i in (delta_x_tol, old_close_approach_dist))),
str(dv_best),
))
raise ValueError(errorstring)
"""
def true_anomaly(self, t: float = 0) -> float:
"""True anomaly (rad)"""
# ~8μs avg.
E, e = self.eccentric_anomaly(t), self.e
return 2 * atan2((1+e)**.5 * sin(E/2), (1-e)**.5 * cos(E/2))
def v_at(self, r: float) -> float:
"""Orbital velocity at radius (m/s)"""
return (self.parent.mu*(2/r-1/self.a))**.5
class Rotation:
def __init__(self, **properties):
self.properties = properties
@property
def axis_vector(self) -> Tuple[float, float, float]:
"""Return unit vector of axis (dimensionless)"""
theta, phi = self.dec, self.ra
return sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta)
@property
def dec(self) -> float:
"""Declination of axis (rad)"""
return self.properties['dec']
@property
def p(self) -> float:
"""Period (s)"""
return self.properties['period']
@property
def ra(self) -> float:
"""Right ascension of axis (rad)"""
return self.properties['ra']
@property
def tilt(self) -> float:
"""Axial tilt (rad)"""
return self.properties['tilt']
class Atmosphere:
def __init__(self, **properties):
self.properties = properties
@property
def composition(self) -> dict:
"""Composition (element: fraction)"""
return self.properties['composition']
@property
def density(self) -> float:
"""Density of atmosphere at surface, approximation, kg/m^3
Does not account for composition or temperature, only pressure."""
return 1.225 * self.surface_pressure / earth.atmosphere.surface_pressure
@property
def greenhouse(self) -> float:
"""Estimate greenhouse factor (dimensionless)"""
# initially based on trial and error
# eventually I gave up and 90% of this is copied from a4x
gh_p = self.greenhouse_pressure / atm
atm_pressure = self.surface_pressure / earth.atmosphere.surface_pressure
correction_factor = 1.319714531668124
ghe_max = 3.2141846382913877
return min(ghe_max, 1 + ((atm_pressure/10) + gh_p) * correction_factor)
@property
def greenhouse_pressure(self) -> float:
"""Surface pressure (Pa)"""
ghg = {
'CH4',
'CO2',
}
return sum(self.partial_pressure(i) for i in ghg if i in self.composition)
@property
def mesopause(self) -> float:
"""Altitude of Mesopause, approximation, m
See notes for Tropopause."""
return self.altitude(earth.atmosphere.pressure(85000)) # avg.
@property
def scale_height(self) -> float:
"""Scale height (m)"""
return self.properties['scale_height']
@property
def stratopause(self) -> float:
"""Altitude of Stratopause, approximation, m
See notes for Tropopause."""
return self.altitude(earth.atmosphere.pressure(52500)) # avg.
@property
def surface_pressure(self) -> float:
"""Surface pressure (Pa)"""
return self.properties['surface_pressure']
@property
def tropopause(self) -> float:
"""Altitude of Tropopause, approximation, m
Uses Earth's atmosphere as a model, so,
for Earthlike planets, this is generally accurate.
Otherwise, treat as a first-order approximation,
likely within a factor of 2 from the true value.
Might return negative numbers if atmosphere is too thin."""
return self.altitude(earth.atmosphere.pressure(13000)) # avg.
# methods
def altitude(self, pressure: float) -> float:
"""Altitude at which atm has pressure, in Pa"""
return -self.scale_height*log(pressure/self.surface_pressure)
def partial_pressure(self, molecule: str) -> float:
"""Partial pressure of a molecule on the surface (Pa)"""
return self.surface_pressure * self.composition[molecule]
def pressure(self, altitude: float) -> float:
"""Pressure at altitude (Pa)"""
return self.surface_pressure * exp(-altitude / self.scale_height)
def wind_pressure(self, v: float) -> float:
"""Pressure of wind going v m/s, in Pa"""
return self.density * v**2
class Body:
def __init__(self, **properties):
self.properties = properties
# orbital properties
@property
def orbit(self) -> Orbit:
return self.properties['orbit']
@property
def orbit_rot_synodic(self) -> float:
"""Synodic period of moon orbit (self) and planetary rotation (s)"""
return synodic(self.orbit.p, self.orbit.parent.rotation.p)
@property
def esi(self) -> float:
# Radius, Density, Escape Velocity, Temperature
"""Earth similarity index (dimensionless)"""
r, rho, T = self.radius, self.density, self.temp
r_e, rho_e = earth.radius, earth.density
esi1 = 1-abs((r-r_e)/(r+r_e))
esi2 = 1-abs((rho-rho_e)/(rho+rho_e))
esi3 = 1-abs((self.v_e-earth.v_e)/(self.v_e+earth.v_e))
esi4 = 1-abs((T-earth.temp)/(T+earth.temp))
return esi1**(.57/4)*esi2**(1.07/4)*esi3**(.7/4)*esi4**(5.58/4)
@property
def hill(self) -> float:
"""Hill Sphere (m)"""
a, e, m, M = self.orbit.a, self.orbit.e, self.mass, self.orbit.parent.mass
return a*(1-e)*(m/3/M)**(1/3)
@property
def max_eclipse_duration(self) -> float:
"""Maximum theoretical eclipse duration (s)"""
star_r = self.orbit.parent.star.radius
planet_a = self.orbit.parent.orbit.apo
planet_r = self.orbit.parent.radius
moon_a = self.orbit.peri
# moon_r = self.radius
theta = atan2(star_r - planet_r, planet_a)
shadow_r = planet_r-moon_a*tan(theta)
orbit_fraction = shadow_r / (pi*self.orbit.a)
return self.orbit.p * orbit_fraction
@property
def nadir_time(self) -> float:
"""One-way speed of light lag between nadir points of moon (self) and planet (s)"""
d = self.orbit.a - self.orbit.parent.radius - self.radius
return d/c
@property
def soi(self) -> float:
"""Sphere of influence (m)"""
return self.orbit.a*(self.mass/self.orbit.parent.mass)**.4
@property
def star(self):
# type: (Body) -> Star
"""Get the nearest star in the hierarchy"""
p = self.orbit.parent
return p if isinstance(p, Star) else p.star
@property
def star_dist(self) -> float:
"""Get the distance to the nearest star in the hierarchy"""
p = self.orbit.parent
return self.orbit.a if isinstance(p, Star) else p.star_dist
@property
def star_radius(self) -> float:
"""Get the radius of the nearest star in the hierarchy"""
p = self.orbit.parent
return p.radius if isinstance(p, Star) else p.star_radius
@property
def temp(self) -> float:
"""Planetary equilibrium temperature (K)"""
a, R, sma, T = self.albedo, self.star_radius, self.star_dist, self.star.temperature
return T*(1-a)**.25*(R/2/sma)**.5
@property
def temp_subsolar(self) -> float:
"""Planetary temperature at subsolar point(K)"""
# based on formula from Reichart
return self.temp * 2**.5
@property
def tidal_locking(self) -> float:
"""Tidal locking timeframe (s)"""
return 5e28 * self.orbit.a**6 * self.radius / (self.mass * self.orbit.parent.mass**2)
# rotation properties
@property
def rotation(self) -> Rotation:
return self.properties['rotation']
@property
def solar_day(self) -> float:
"""True solar day (s)"""
t, T = self.rotation.p, self.orbit.p
return (t*T)/(T-t)
@property
def v_tan(self) -> float:
"""Equatorial rotation velocity (m/s)"""
return 2*pi*self.radius/self.rotation.p
@property
def rotational_energy(self) -> float:
"""Rotational Energy (J)"""
i = 2/5 * self.mass * self.radius**2
omega = 2*pi / self.rotation.p
return 1/2 * i * omega**2
@property
def atmosphere_mass(self) -> float:
"""Mass of the atmosphere (kg)"""
return self.atmosphere.surface_pressure * self.area / self.surface_gravity
# atmospheric properties
@property
def atm_retention(self) -> float:
"""Checks if v_e is high enough to retain compound; molmass in kg/mol"""
# initial assessment from :
# https://upload.wikimedia.org/wikipedia/commons/4/4a/Solar_system_escape_velocity_vs_surface_temperature.svg
# revised based on formulas derived from:
# Reichart, Dan. "Lesson 6 - Earth and the Moon". Astronomy 101: The Solar System, 1st ed.
try:
t = self.greenhouse_temp
except KeyError:
t = self.temp
return 887.364 * t / (self.v_e)**2
@property
def atmosphere(self) -> Atmosphere:
return self.properties['atmosphere']
@property
def greenhouse_temp(self) -> float:
"""Planetary equilibrium temperature w/ greenhouse correction (K)"""
return self.temp * self.atmosphere.greenhouse
# physical properties
@property
def albedo(self) -> float:
"""Albedo (dimensionless)"""
return self.properties['albedo']
@property
def area(self) -> float:
"""Surface area (m^2)"""
return 4*pi*self.radius**2
@property
def categories(self) -> set:
"""List all possible categories for body"""
ice_giant_cutoff = 80*earth.mass # https://en.wikipedia.org/wiki/Super-Neptune
categories = set()
if isinstance(self, Star):
return {'Star'}
mass = 'mass' in self.properties and self.mass
rounded = search('Miranda').mass <= mass # Miranda is the smallest solar system body which might be in HSE
if 'Star' not in self.orbit.parent.categories:
categories.add('Moon')
if rounded:
categories.add('Major Moon')
else:
categories.add('Minor Moon')
return categories
# substellar
if 13*jupiter.mass < mass:
return {'Brown Dwarf'}
radius = 'radius' in self.properties and self.radius
if mass and 1 <= self.planetary_discriminant:
categories.add('Planet')
# earth-relative
if earth.mass < mass < ice_giant_cutoff:
categories.add('Super-earth')
elif mass < earth.mass:
categories.add('Sub-earth')
if search('Ceres').radius < radius < mercury.radius:
categories.add('Mesoplanet')
# USP
if self.orbit.p < day:
categories.add('Ultra-short period planet')
# absolute
if self.mass < 10*earth.mass or (mars.density <= self.density and self.mass < jupiter.mass): # to prevent high-mass superjupiters
categories.add('Terrestrial Planet')
if 'composition' in self.properties and set('CO') <= set(self.composition) and self.composition['O'] < self.composition['C']:
categories.add('Carbon Planet')
if 10*earth.mass < self.mass:
categories.add('Mega-Earth')
try:
temperature = (self.atmosphere.greenhouse if 'atmosphere' in self.properties else 1) * self.temp
if earth.atmosphere.greenhouse * earth.temp < temperature < 300: # death valley avg. 298K
# https://en.wikipedia.org/wiki/Desert_planet
categories.add('Desert Planet')
elif temperature < 260 and .9 < self.albedo:
# https://en.wikipedia.org/wiki/Ice_planet
categories.add('Ice Planet')
elif 1000 < temperature:
# https://en.wikipedia.org/wiki/Lava_planet
categories.add('Lava Planet')
except KeyError:
pass
else:
categories.add('Giant Planet')
if .1 < self.orbit.e:
categories.add('Eccentric Jupiter')
if 'atmosphere' in self.properties and 'composition' in self.atmosphere.properties and 'He' in self.atmosphere.composition and .5 < self.atmosphere.composition['He']:
categories.add('Helium Planet')
if mass < ice_giant_cutoff:
categories.add('Ice Giant')
if neptune.mass < self.mass:
categories.add('Super-Neptune')
if 1.7*earth.radius < self.radius < 3.9*earth.radius:
categories.add('Gas Dwarf')
if self.orbit.a < au:
categories.add('Hot Neptune')
else:
categories.add('Gas Giant')
if jupiter.mass < self.mass:
categories.add('Super-Jupiter')
if self.density < saturn.density:
categories.add('Puffy Planet')
if self.orbit.p < 10*day:
categories.add('Hot Jupiter')
return categories
# subplanetary
categories.add('Minor Planet')
if 9e20 < mass: # smallest dwarf planet is Ceres
categories.add('Dwarf Planet')
# crossers
# [print(n, b.categories) for n, b in universe.items() if any('Crosser' in c for c in b.categories)]
for planet_name, body in solar_system.items():
if planet_name in {'Moon', 'Sun'}:
continue
if self.orbit.peri < body.orbit.apo and body.orbit.peri < self.orbit.apo:
categories.add('{} Crosser'.format(planet_name))
if self.orbit.get_resonance(body.orbit, 2) == (1, 1): # even the worst matches for jupiter trojans are just over 2 sigma certainty
categories.add('{} Trojan'.format(planet_name))
# NEO
if self.orbit.peri < 1.3*au:
categories.add('NEO')
if self.orbit.peri < earth.orbit.apo:
if 70 < radius or 1e9 < mass:
categories.add('PHO')
if earth.orbit.a < self.orbit.a:
categories.add('Apollo Asteroid')
else:
categories.add('Amor Asteroid')
if self.orbit.a < earth.orbit.a:
if earth.orbit.peri < self.orbit.apo:
categories.add('Aten Asteroid')
else:
categories.add('Atira Asteroid')
# orbit distance
if self.orbit.a < jupiter.orbit.a: # asteroids which aren't necessarily NEOs
if 2.06*au < self.orbit.a < 3.28*au:
# [print(n, b.categories) for n, b in universe.items() if 'Asteroid Belt' in b.categories]
categories.add('Asteroid Belt')
if self.orbit.a < 2.5*au:
categories.add('Inner Main Belt')
if 2.26*au < self.orbit.a < 2.48*au and .035 < self.orbit.e < .162 and 5*deg < self.orbit.i < 8.3*deg:
categories.add('Vesta Family')
if 2.3*au < self.orbit.a and self.orbit.i < 18*deg:
categories.add('Main Belt I Asteroid')
elif self.orbit.a < 2.82*au:
categories.add('Middle Main Belt')
if self.orbit.i < 33*deg:
if self.orbit.a < 2.76*au:
categories.add('Main Belt IIa Asteroid')
else:
categories.add('Main Belt IIb Asteroid')
else:
categories.add('Outer Main Belt')
if self.orbit.e < .35 and self.orbit.i < 30*deg:
if self.orbit.a < 3.03*au:
categories.add('Main Belt IIIa Asteroid')
else:
categories.add('Main Belt IIIb Asteroid')
# Alinda
if 2.45*au < self.orbit.a < 2.56*au:
categories.add('Alinda Asteroid')
# non-main group asteroids
elif self.orbit.a < mercury.orbit.a:
categories.add('Vulcanoid')
elif .96*au < self.orbit.a < 1.04*au and self.orbit.i < 4.4*deg and self.orbit.e < .084:
categories.add('Arjuna Asteroid')
elif 1.78*au < self.orbit.a < 2*au and self.orbit.e < .18 and 16*deg < self.orbit.i < 34*deg:
categories.add('Hungaria Group')
# Jupiter resonance groups
res_groups = {
(1, 3): 'Alinda',
(1, 2): 'Hecuba Gap',
(4, 7): 'Cybele',
(2, 3): 'Hilda',
(3, 4): 'Thule',
}
resonance = self.orbit.get_resonance(jupiter.orbit)
if resonance in res_groups:
categories.add('Resonant Asteroid')
categories.add('{} Asteroid'.format(res_groups[resonance]))
elif self.orbit.a < neptune.orbit.a:
# https://en.wikipedia.org/wiki/Centaur_(small_Solar_System_body)#Discrepant_criteria
categories.add('Centaur')
else:
categories.add('TNO')
if rounded:
categories.add('Plutoid')
# resonance
resonance = self.orbit.get_resonance(neptune.orbit)
if max(resonance) <= 11:
categories.add('Resonant KBO')
if resonance == (2, 3):
categories.add('Plutino')
elif resonance == (1, 2):
categories.add('Twotino')
elif resonance == (1, 1):
categories.add('Neptune Trojan')
else:
categories.add(str(resonance)[1:-1].replace(', ', ':') + ' res')
# KBO versus SDO
if self.orbit.a < 50*au:
categories.add('KBO')
if self.orbit.e < .2 and 11 < max(resonance):
# https://en.wikipedia.org/wiki/Classical_Kuiper_belt_object#DES_classification
# slightly modified to allow Makemake to get in
categories.add('Cubewano')
if self.orbit.i < 5*deg and self.orbit.e < .1:
categories.add('Cold Population Cubewano')
else:
categories.add('Hot Population Cubewano')
# haumea family
if 41.6*au < self.orbit.a < 44.2*au and .07 < self.orbit.e < .2 and 24.2*deg < self.orbit.i < 29.1*deg:
categories.add('Haumea Family')
else:
categories.add('SDO')
if 40*au < self.orbit.peri:
categories.add('Detached Object')
if 150*au < self.orbit.a:
categories.add('ETNO')
if 38*au < self.orbit.peri < 45*au and .85 < self.orbit.e:
categories.add('ESDO')
if 40*au < self.orbit.peri < 60*au:
categories.add('EDDO')
if 50*au < self.orbit.peri:
categories.add('Sednoid')
# Damocloid
if self.orbit.peri < jupiter.orbit.a and 8*au < self.orbit.a and .75 < self.orbit.e:
# https://en.wikipedia.org/wiki/Damocloid
categories.add('Damocloid')
return categories
@property
def category(self) -> str:
"""Attempt to categorize the body [DEPRECATED, USE CATEGORIES INSTEAD]"""
if isinstance(self, Star):
return 'star'
isround = 2e5 < self.radius
if isinstance(self.orbit.parent, Star):
# heliocentric
if not isround:
return 'asteroid'
# planets and dwarf planets
if self.planetary_discriminant < 1:
return 'dwarf planet'
# planets
if self.radius < mercury.radius:
return 'mesoplanet'
if self.radius < 4/5 * earth.radius:
return 'subearth'
if self.radius < 5/4 * earth.radius:
return 'earthlike'
if self.mass < 10 * earth.mass:
return 'superearth'
if self.mass < 3e26: # SWAG
return 'ice giant'
if self.mass < 13*jupiter.mass:
return 'gas giant'
return 'brown dwarf'
# moons
if isround:
return 'major moon'
return 'minor moon'
@property
def circumference(self) -> float:
"""Circumference (m)"""
return 2*pi*self.radius
@property
def composition(self) -> dict:
"""Composition (element: fraction)"""
return self.properties['composition']
@property
def data(self) -> str:
"""Data Table a la Space Engine"""
# sadly the only alternative would be like, 5000 lines of try/except
lines = (
"'{} {}'.format(self.category.title(), self.properties['name'])",
"'Class {}'.format(self.setype)",
"'Radius {} ({} Earths)'.format(pretty_dim(Length(self.radius)), round(self.radius/earth.radius, 3))",
"'Mass {}'.format(str(Mass(self.mass, 'astro')))",
"'Density {} kg/m³'.format(round(self.density))",
"'ESI {}'.format(round(self.esi, 3))",
"'Absolute Mag. {}'.format(round(self.app_mag_at(10*pc), 2))",
"'Semimajor Axis {}'.format(pretty_dim(Length(self.orbit.a, 'astro')))",
"'Orbital Period {}'.format(pretty_dim(Time(self.orbit.p, 'imperial')))",
"'Rotation Period {}'.format(pretty_dim(Time(self.rotation.p, 'imperial')))",
"'Solar Day {}'.format(pretty_dim(Time(self.solar_day, 'imperial')))",
"'Axial Tilt {}'.format(Angle(self.rotation.tilt, 'deg'))",
"'Gravity {} g'.format(round(self.surface_gravity/earth.surface_gravity, 3))",
# "'Atmosphere Composition {}'.format(', '.join(sorted(list(self.atmosphere.composition), key=lambda x: self.atmosphere.composition[x], reverse=True)[:5]))",
"'Atmos. Pressure {} atm'.format(round(self.atmosphere.surface_pressure/earth.atmosphere.surface_pressure), 3)",
"'Temperature {} K'.format(round((self.atmosphere.greenhouse if 'atmosphere' in self.properties else 1)*self.temp, 2))",
"'Greenhouse Eff. {} K'.format(round((self.atmosphere.greenhouse-1)*self.temp, 2))",
)
string = []
for line in lines:
try:
string.append(eval(line))
except KeyError:
pass
return '\n'.join(string)
@property
def density(self) -> float:
"""Density (kg/m^3)"""
return self.mass/self.volume
@property
def diameter(self) -> float:
"""Diameter (m)"""
return 2*self.radius
@property
def gravbinding(self) -> float:
"""gravitational binding energy (J)"""
return 3*g*self.mass**2/(5*self.radius)
@property
def mass(self) -> float:
"""Mass (kg)"""
return self.properties['mass']
@property
def metal_report(self) -> str:
"""Information regarding important metals"""
symbols = 'Fe Ni Cu Pt Au Ag U Co Al'.split(' ')
ms, assume = self.metals
string = 'COMPOSITION REPORT'
Fe, Ni, Cu, Pt, Au, Ag, U, Co, Al = [Mass(ms[sym]*self.mass, 'astro') for sym in symbols]
if assume:
string += '\n(Assuming Earthlike composition)'
if any([Fe, Ni]):
string += '\nBase Metals\n\tFe: {}\n\tNi: {}'.format(Fe, Ni)
if any([Pt, Au, Ag]):
string += '\nPrecious\n\tAu: {}\n\tAg: {}\n\tPt: {}'.format(Au, Ag, Pt)
if any([Cu, U]):
string += '\nOther\n\tAl: {}\n\tCo: {}\n\tCu: {}\n\tU: {}'.format(Al, Co, Cu, U)
return string
@property
def metals(self) -> Tuple[Dict[str, Optional[int]], bool]:
"""Metal data for metal/mining reports"""
symbols = 'Fe Ni Cu Pt Au Ag U Co Al'.split(' ')
assume = False
try:
ms = {sym: (self.composition[sym] if sym in self.composition else None) for sym in symbols}
except KeyError:
if isinstance(self, Star):
target = sun
elif 10*earth.mass < self.mass:
target = jupiter
print('jupiter')
else:
target = earth
ms = {sym: target.composition[sym] if sym in target.composition else 0 for sym in symbols}
assume = True
return ms, assume
@property
def mining_report(self) -> str:
"""Information regarding mining"""
string = 'MINING REPORT\nMinable metal mass (<4km deep):'
mass = self.density * -self.shell(-4000) # depest mines are 4km deep, some wiggle room
production = {
'Fe': 2.28e12, # 2015: 2,280 million tons
'Ni': 2.3e9, # More than 2.3 million tonnes (t) of nickel per year are mined worldwide,
'Au': 3.15e6, # 3,150 t/yr
'Ag': 3.8223e7, # 38,223 t/yr
'Pt': 1.61e5, # 2014: 161 t/yr
'Cu': 1.97e10, # 2017: 19.7 million t/yr
'U': 6.0496e7, # worldwide production of uranium in 2015 amounted to 60,496 tonnes
'Co': 1.1e8, # 2017: 110,000 t/yr
'Al': 5.88e10, # 2016: 58.8 million t/yr
}
ms, assume = self.metals
ms = {sym: Mass(ms[sym]*mass, 'astro') for sym in production if ms[sym]}
ms = {sym: (mass, Time(year * mass.value / production[sym], 'imperial')) for sym, mass in ms.items()}
if assume:
string += '\n(Assuming Earthlike composition)'
string += '\n(Times assume earthlike extraction rates)'
for sym, (mass, time) in sorted(list(ms.items()), key=lambda x: x[1][0], reverse=True):
string += '\n\t{}: {} ({})'.format(sym, mass, time)
return string
@property
def mu(self) -> float:
"""Gravitational parameter (m^3/s^2)"""
return g * self.mass
@property
def planetary_discriminant(self) -> float:
"""Margot's planetary discriminant (dimensionless)"""
a, m, M = self.orbit.a/au, self.mass/earth.mass, self.orbit.parent.mass/sun.mass
# everything above is confirmed correct
# https://en.wikipedia.org/wiki/Clearing_the_neighbourhood#cite_note-5
# C, m_earth, m_sun, t_sun = 2*3**.5, earth.mass, sun.mass, sun.lifespan/year
# k = 3**.5 * C**(-3/2) * (100*t_sun)**(3/4) * m_earth/m_sun
# everything below is confirmed correct
# print(807, '~', k)
k = 807
return k*m/(M**(5/2)*a**(9/8))
@property
def radius(self) -> float:
"""Radius (m)"""
return self.properties['radius']
@property
def rest_energy(self) -> float:
"""rest energy"""
return self.mass*c**2
@property
def surface_gravity(self) -> float:
"""Surface gravity (m/s^2)"""
return self.mu/self.radius**2
@property
def schwarzschild(self) -> float:
"""Schwarzschild radius (m)"""
return 2*self.mu/c**2
@property
def setype(self) -> float:
"""Space Engine-like classifier"""
temperature = self.temp
try:
temperature *= self.atmosphere.greenhouse
except KeyError:
pass
if 800 < temperature:
heat = 'Scorched'
elif 400 < temperature:
heat = 'Hot'
elif 300 < temperature:
heat = 'Warm'
elif 250 < temperature:
heat = 'Temperate'
elif 200 < temperature:
heat = 'Cool'
elif 100 < temperature:
heat = 'Cold'
else:
heat = 'Frozen'
return heat + ' ' + self.category.title()
@property
def v_e(self) -> float:
"""Surface escape velocity (m/s)"""
return (2*self.mu/self.radius)**.5
@property
def volume(self) -> float:
"""Volume (m^3)"""
return 4/3*pi*self.radius**3
# satellite properties
@property
def synchronous(self) -> float:
"""SMA of synchronous orbit (m)"""
mu = g*self.mass
return (mu*self.rotation.p**2/4/pi**2)**(1/3)
# double underscore methods
def __gt__(self, other) -> bool:
# type: (Body, Body) -> bool
# WA uses radius, so despite my better judgement, so will I
return other.radius < self.radius
def __lt__(self, other) -> bool:
# type: (Body, Body) -> bool
return self.radius < other.radius
# methods
def acc_towards(self, other, t: float) -> float:
# type: (Body, Body, float) -> float
"""Acceleration of self towards other body at time t (m/s^2)"""
return self.force_between(other, t) / self.mass
def acc_vector_towards(self, other, t: float) -> Tuple[float, float, float]:
# type: (Body, Body, float) -> Tuple[float, float, float]
"""Acceleration vector of self towards other body at time t (m/s^2)"""
a, b = self.orbit.cartesian(t)[:3], other.orbit.cartesian(t)[:3]
dx, dy, dz = [i-j for i, j in zip(a, b)]
scale_factor = (dx**2 + dy**2 + dz**2)**.5
acc = self.acc_towards(other, t)
ax, ay, az = [acc*i/scale_factor for i in (dx, dy, dz)] # scale_factor*i is in [0, 1]
# print(self.acc_towards(other, t))
# print(ax, ay, az)
# print('plot_grav_acc_vector(sedna, planet_nine)')
return ax, ay, az
def angular_diameter(self, other: Orbit) -> Tuple[float, float]:
"""Angular diameter, min and max (rad)"""
dmin, dmax = self.orbit.distance(other)
return self.angular_diameter_at(dmax), self.angular_diameter_at(dmin)
def angular_diameter_at(self, dist: float) -> float:
"""Angular diameter at distance (rad)"""
return 2*atan2(self.radius, dist)
def app_mag(self, other: Orbit) -> Tuple[float, float]:
"""Apparent magnitude, min and max (dimensionless)"""
dmin, dmax = self.orbit.distance(other)
return self.app_mag_at(dmax), self.app_mag_at(dmin)
def app_mag_at(self, dist: float) -> float:
"""Apparent magnitude at distance (dimensionless)"""
# https://astronomy.stackexchange.com/a/38377
a_p, r_p, d_s, v_sun = self.albedo, self.radius, self.star_dist, self.star.abs_mag
h_star = v_sun + 5*log10(au/(10*pc))
d_0 = 2*au*10**(h_star/5)
h = 5 * log10(d_0 / (2*r_p * a_p**.5))
return h + 5*log10(d_s * dist / au**2)
def atm_supports(self, molmass: float) -> bool:
"""Checks if v_e is high enough to retain compound; molmass in kg/mol"""
return molmass > self.atm_retention
def atmospheric_molecular_density(self, altitude: float) -> float:
"""Molecular density at an altitude (m) in (mol/m^3)"""
return self.atmosphere.pressure(altitude)/(gas_constant*self.greenhouse_temp)
def bielliptic(self, inner: Orbit, mid: Orbit, outer: Orbit) -> float:
"""Bielliptic transfer delta-v (m/s)"""
i, m, o = inner.a, mid.a, outer.a
mu = self.mu
a1 = (i+m)/2
a2 = (m+o)/2
dv1 = (2*mu/i-mu/a1)**.5-(mu/i)**.5
dv2 = (2*mu/m-mu/a2)**.5-(2*mu/m-mu/a1)**.5
dv3 = (2*mu/o-mu/a2)**.5-(mu/o)**.5
return dv1 + dv2 + dv3
def force_between(self, other, t: float = 0) -> float:
"""Force between two bodies at time t (N)"""
m1, m2 = self.mass, other.mass
r = self.orbit.distance_to(other.orbit, t)
return g*m1*m2/r**2
def hohmann(self, inner: Orbit, outer: Orbit) -> float:
"""Hohmann transfer delta-v (m/s)"""
i, o = inner.a, outer.a
mu = self.mu
dv1 = (mu/i)**.5*((2*o/(i+o))**.5-1)
dv2 = (mu/i)**.5*(1-(2*i/(i+o))**.5)
return dv1 + dv2
def lunar_eclipse(self) -> None:
"""Draw maximum eclipsing radii"""
satellite, planet = self, self.orbit.parent
a = satellite.orbit.peri
r = planet.radius
moon_radius = satellite.angular_diameter_at(a-r)
umbra_radius = atan2(planet.umbra_at(a), a-r)
penumbra_radius = atan2(planet.penumbra_at(a), a-r)
fig, ax = plt.subplots()
ax.axis('scaled')
plt.title('Apparent Diameters from Surface')
plt.xlabel('x (rad)')
plt.ylabel('y (rad)')
# umbra
umbra_circle = Circle((0, 0), radius=umbra_radius, color='k')
umbra_ring = Circle((0, 0), radius=umbra_radius, color='k', linestyle='-', fill=False)
# penumbra
penumbra_circle = Circle((0, 0), radius=penumbra_radius, color='grey')
penumbra_ring = Circle((0, 0), radius=penumbra_radius, color='grey', linestyle='-', fill=False)
# moon
moon_circle = Circle((0, 0), radius=moon_radius, color='orange')
ax.add_artist(penumbra_circle)
ax.add_artist(umbra_circle)
ax.add_artist(moon_circle)
ax.add_artist(penumbra_ring)
ax.add_artist(umbra_ring)
# legend
plt.legend(handles=[
Patch(color='orange', label='Moon'),
Patch(color='black', label='Umbra'),
Patch(color='grey', label='Penumbra'),
])
plt.show()
def net_grav_acc_vector(self, system, t: float) -> Tuple[float, float, float]:
"""Net gravitational acceleration vector (m/s^2)"""
vectors = [self.acc_vector_towards(body, t) for body in system.bodies if body != self]
return tuple(map(sum, zip(*vectors)))
def penumbra_at(self, distance: float) -> float:
"""Penumbra radius at distance (m)"""
planet, star = self, self.orbit.parent
slope = (planet.radius+star.radius) / planet.orbit.a # line drawn from "top of star" to "bottom of planet"
return slope*distance + planet.radius
def roche(self, other) -> float:
"""Roche limit of a body orbiting this one (m)"""
m, rho = self.mass, other.density
return (9*m/4/pi/rho)**(1/3)
def shell(self, other: float) -> float:
"""Volume of a shell extending xxx meters above the surface (m^3)"""
r = max(self.radius + other, 0)
v = 4/3 * pi * r**3
return v - self.volume
def solar_eclipse(self):
"""Draw maximum eclipsing radii"""
satellite, planet, star = self, self.orbit.parent, self.orbit.parent.orbit.parent
moon_radius = satellite.angular_diameter_at(satellite.orbit.peri - planet.radius)
star_radius = star.angular_diameter_at(planet.orbit.apo - planet.radius)
fig, ax = plt.subplots()
ax.axis('scaled')
plt.title('Apparent Diameters from Surface')
plt.xlabel('x (rad)')
plt.ylabel('y (rad)')
# star
star_circle = Circle((0, 0), radius=star_radius, color='y')
star_ring = Circle((0, 0), radius=star_radius, color='y', linestyle='-', fill=False)
# moon
moon_circle = Circle((0, 0), radius=moon_radius, color='k')
ax.add_artist(star_circle)
ax.add_artist(moon_circle)
ax.add_artist(star_ring)
# legend
plt.legend(handles=[
Patch(color='y', label='Star'),
Patch(color='k', label='Moon'),
])
plt.show()
def umbra_at(self, distance: float) -> float:
"""Umbra radius at distance (m)"""
planet, star = self, self.orbit.parent
slope = (planet.radius-star.radius) / planet.orbit.a # line drawn from "top of star" to "top of planet"
return slope*distance + planet.radius
# ty https://www.python-course.eu/python3_inheritance.php
class Star(Body):
@property
def abs_mag(self) -> float:
"""Absolute Magnitude (dimensionless)"""
return -2.5 * log10(self.luminosity / L_0)
@property
def habitable_zone(self) -> Tuple[float, float]:
"""Inner and outer habitable zone (m)"""
center = au*(self.luminosity/sun.luminosity)**.5
inner = .95*center
outer = 1.37*center
return inner, outer
@property
def lifespan(self) -> float:
"""Estimated lifespan (s)"""
return 3e17*(self.mass/sun.mass)**-2.5162
@property
def luminosity(self) -> float:
"""Luminosity (W)"""
return self.properties['luminosity']
@property
def peakwavelength(self) -> float:
"""Peak emission wavelength (m)"""
return 2.8977729e-3/self.temperature
@property
def temperature(self) -> float:
"""Temperature (K)"""
return self.properties['temperature']
@property
def X(self) -> float:
"""Hydrogen composition (dimensionless)"""
return self.composition['H']
@property
def Y(self) -> float:
"""Helium composition (dimensionless)"""
return self.composition['He']
@property
def Z(self) -> float:
"""Metal composition (dimensionless)"""
return 1 - self.X - self.Y
# methods
def app_mag(self, dist: float) -> float:
"""Apparent Magnitude (dimensionless)"""
return 5 * log10(dist / (10*pc)) + self.abs_mag
def radiation_pressure_at(self, dist: float) -> float:
"""Stellar radiation pressure at a distance"""
wattage = self.luminosity / sun.luminosity
return wattage * G_SC / (c*dist**2)
def radiation_force_at(self, obj: Body, t: float = 0) -> float:
"""Stellar radiation force on a planet at a time"""
wattage = self.luminosity / sun.luminosity
dist = sum(i**2 for i in obj.orbit.cartesian(t)[:3])**.5 / au
area = obj.area / 2
return wattage * G_SC / (c*dist**2) * area
class System:
"""Set of orbiting bodies"""
def __init__(self, parent: Body, *bodies: Body) -> None:
"""Star system containing bodies.\nDoesn't need to be ordered."""
self.parent = parent
self.bodies = set(bodies)
@property
def sorted_bodies(self) -> list:
"""List of bodies sorted by semimajor axis"""
return sorted(list(self.bodies), key=lambda x: x.orbit.a)
def plot(self) -> None:
"""Plot system with pyplot"""
# see above plot for notes and sources
n = 1000
fig = plt.figure(figsize=(7, 7))
ax = Axes3D(fig)
ax.set_title('Orbit')
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
ax.set_zlabel('z (m)')
ax.scatter(0, 0, 0, marker='*', color='y', s=50, zorder=2)
for body in self.bodies:
cs = [body.orbit.cartesian(t*body.orbit.p/n) for t in range(n)]
xs, ys, zs, vxs, vys, vzs = zip(*cs)
ax.plot(xs, ys, zs, color='k', zorder=1)
ax.scatter(xs[0], ys[0], zs[0], marker='o', s=15, zorder=3)
axisEqual3D(ax)
plt.show()
def plot2d(self) -> None:
"""2D Plot system with pyplot"""
# see above plot for notes and sources
n = 1000
plt.figure(figsize=(7, 7))
plt.title('Orbit')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.scatter(0, 0, marker='*', color='y', s=50, zorder=2)
for body in self.bodies:
cs = [body.orbit.cartesian(t*body.orbit.p/n) for t in range(n)]
xs, ys, zs, vxs, vys, vzs = zip(*cs)
plt.plot(xs, ys, color='k', zorder=1)
plt.scatter(xs[0], ys[0], marker='o', s=15, zorder=3)
plt.show()
def mass_pie(self) -> None:
"""Mass pie chart"""
system_masses = [i.mass for i in self.bodies]
plt.subplot(1, 2, 1)
plt.title('System mass')
plt.pie(system_masses + [self.parent.mass])
plt.subplot(1, 2, 2)
plt.title('System mass (excl. primary)')
plt.pie(system_masses)
plt.show()
def sim(self) -> None:
"""Use pygame to produce a better simulation, albeit in 2D"""
import pygame
from time import sleep # , time
orbit_res = 64
dot_radius = 2
black, blue, white = (0,)*3, (0, 0, 255), (255,)*3
timerate = self.sorted_bodies[0].orbit.p/32
max_a = self.sorted_bodies[-1].orbit.apo
size = 800, 800
width, height = size
pygame.init()
screen = pygame.display.set_mode(size)
refresh = pygame.display.flip
title = str(self)
pygame.display.set_caption(title)
fontsize = 20
font = pygame.font.SysFont('Courier New', fontsize)
t = 0
# precompute orbits
orbits = {body: tuple((body.orbit.cartesian(t+i*body.orbit.p/orbit_res)[:2],
body.orbit.cartesian(t+(i+1)*body.orbit.p/orbit_res)[:2]) for i in range(orbit_res)) for body in self.bodies}
# frame
while 1:
# print('t =', t)
t += timerate
screen.fill(black)
# show bodies
# show star
star_radius = round(self.parent.radius/max_a * width)
try:
pygame.draw.circle(screen, white, (width//2, height//2),
star_radius if dot_radius < star_radius else dot_radius)
except OverflowError:
pass
# show planets
xmap = linear_map((-max_a, max_a), (0, width))
ymap = linear_map((-max_a, max_a), (height, 0))
# start_time = time()
for body in self.bodies: # ~500 μs/body @ orbit_res = 64
x, y, z, vx, vy, vz = body.orbit.cartesian(t)
coords = int(round(xmap(x))), int(round(ymap(y)))
# redraw orbit
for start_pos, end_pos in orbits[body]:
x, y = start_pos
start_coords = int(round(xmap(x))), int(round(ymap(y)))
x, y = end_pos
end_coords = int(round(xmap(x))), int(round(ymap(y)))
try:
pygame.draw.line(screen, blue, start_coords, end_coords)
except TypeError:
pass
try:
body_radius = round(body.radius/max_a * width)
except KeyError:
body_radius = 0
try:
pygame.draw.circle(screen, white, coords,
body_radius if dot_radius < body_radius else dot_radius)
except OverflowError:
pass
# print((time() - start_time)/len(self.bodies))
# print date
textsurface = font.render(str(epoch+timedelta(seconds=t))+' (x{0})'.format(int(timerate)), True, white)
screen.blit(textsurface, (0, 0))
# print scale
textsurface = font.render(str(Length(max_a, 'astro')), True, white)
screen.blit(textsurface, (0, fontsize))
refresh()
# event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP_PLUS: # timerate up
timerate *= 2
elif event.key == pygame.K_KP_MINUS: # timerate down
timerate /= 2
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4: # zoom in
max_a /= 2
if event.button == 5: # zoom out
max_a *= 2
sleep(1/30)
# functions
def accrete(star_mass: float = 2e30, particle_n: int = 25000) -> System:
from random import randint, uniform
# from time import time
# constants
# particle_n = 20000 # 500 -> 10 ms; 25000 -> 911 ms
sweep_area = 1.35 # venus-earth
# begin
extra_mass = .0014 * star_mass
particle_mass = extra_mass / particle_n
a_range = mercury.orbit.a * star_mass/sun.mass, neptune.orbit.a * star_mass/sun.mass
# list of [mass, sma]
particles = [[particle_mass, uniform(*a_range)] for _ in range(particle_n)]
# start = time()
for _ in range(10*particle_n):
i_c = randint(0, len(particles)-1)
chosen_particle = particles[i_c]
m_c, a_c = chosen_particle
# find target
for i, (m_o, a_o) in enumerate(particles):
# merge
if a_c/sweep_area < a_o < a_c*sweep_area and chosen_particle != (m_o, a_o):
m_new = m_c + m_o
a_new = (m_c*a_c + m_o*a_o)/m_new
particles[i_c] = m_new, a_new
del particles[i]
break
# early end
if len(particles) <= 6:
break
# print(time() - start)
# construct system
star = stargen(star_mass)
out = System(star, *(Body(**{'mass': mass, 'orbit': Orbit(**{
'parent': star,
'sma': a,
'e': uniform(0, .1), 'i': uniform(0, 4*deg), 'lan': uniform(0, 2*pi), 'aop': uniform(0, 2*pi), 'man': uniform(0, 2*pi)
})}) for mass, a in particles))
return out # .plot2d()
def apsides2ecc(apo: float, peri: float) -> Tuple[float, float]:
return (apo+peri)/2, (apo-peri)/(apo+peri)
def keplerian(parent: Body, cartesian: Tuple[float, float, float, float, float, float]) -> Orbit:
"""Get keplerian orbital parameters (a, e, i, O, o, m)"""
# https://downloads.rene-schwarz.com/download/M002-Cartesian_State_Vectors_to_Keplerian_Orbit_Elements.pdf
r, r_ = np.array(cartesian[:3]), np.array(cartesian[3:])
mu = parent.mu
# 1a Calculate orbital momentum vector h
h = np.cross(r, r_)
# 1b Obtain the eccentricity vector e [1] from
e = np.cross(r_, h) / mu - r / np.linalg.norm(r)
# 1c Determine the vector n pointing towards the ascending node and the true anomaly nu with
n = np.cross(np.transpose((0, 0, 1)), h)
temp = acos(np.dot(e, r) / (np.linalg.norm(e) * np.linalg.norm(r)))
if 0 <= np.dot(r, r_):
nu = temp
else:
nu = 2*pi - temp
# 2 Calculate the orbit inclination i by using the orbital momentum vector h,
# where h z is the third component of h:
h_z = h[2]
i = acos(h_z / np.linalg.norm(h))
# 3 Determine the orbit eccentricity e [1],
# which is simply the magnitude of the eccentricity vector e, and the eccentric anomaly E [1]:
eccentricity = np.linalg.norm(e)
E = 2 * atan(tan(nu/2) / ((1+eccentricity)/(1-eccentricity))**.5)
# print(nu, eccentricity, '-> E =', E)
# E = 2 * atan2(((1+eccentricity)/(1-eccentricity))**.5, tan(nu/2))
# 4 Obtain the longitude of the ascending node Omega and the argument of periapsis omega:
if np.linalg.norm(n):
temp = acos(n[0] / np.linalg.norm(n))
if 0 <= n[1]:
Omega = temp
else:
Omega = 2*pi - temp
temp = acos(np.dot(n, e) / (np.linalg.norm(n) * np.linalg.norm(e)))
if 0 <= e[2]:
omega = temp
else:
omega = 2*pi - temp
else:
Omega, omega = 0, 0
# 5 Compute the mean anomaly M with help of Kepler’s Equation from the eccentric anomaly E
# and the eccentricity e:
M = E - eccentricity * sin(E)
# print(E, eccentricity, '-> M =', M)
# 6 Finally, the semi-major axis a is found from the expression
a = 1 / (2/np.linalg.norm(r) - np.linalg.norm(r_)**2/mu)
return Orbit(**{
'parent': parent,
'sma': a,
'e': eccentricity,
'i': i,
'lan': Omega,
'aop': omega,
'man': M,
})
def plot_delta_between(orbit1: Orbit, orbit2: Orbit):
"""Plot system with pyplot"""
resolution = 100
orbits = 8
limit = max([orbit1, orbit2], key=lambda x: x.apo).apo*2
outerp = max([orbit1, orbit2], key=lambda x: x.p).p
fig = plt.figure(figsize=(7, 7))
ax = Axes3D(fig)
ax.set_title('Body Delta')
ax.set_xlabel('dx (m)')
ax.set_ylabel('dy (m)')
ax.set_zlabel('dz (m)')
ax.set_xlim(-limit, limit)
ax.set_ylim(-limit, limit)
ax.set_zlim(-limit, limit)
ax.scatter(0, 0, 0, marker='*', color='y', s=50, zorder=2)
b1s = [orbit1.cartesian(t*outerp/resolution) for t in range(orbits*resolution)]
b2s = [orbit2.cartesian(t*outerp/resolution) for t in range(orbits*resolution)]
cs = [[a-b for a, b in zip(b1, b2)] for b1, b2 in zip(b1s, b2s)]
xs, ys, zs, vxs, vys, vzs = zip(*cs)
ax.plot(xs, ys, zs, color='k', zorder=1)
plt.show()
def plot_distance(orbit1: Orbit, orbit2: Orbit):
"""Plot distance between two bodies over several orbits"""
resolution = 1000
orbits = 8
outerp = max([orbit1, orbit2], key=lambda x: x.p).p
plt.figure(figsize=(7, 7))
plt.title('Body Delta')
plt.xlabel('time since epoch (s)')
plt.ylabel('distance (m)')
ts = [(t*outerp/resolution) for t in range(orbits*resolution)]
xs = [orbit1.distance_to(orbit2, t) for t in ts]
plt.plot(ts, xs, color='k')
plt.show()
def distance_audio(orbit1: Orbit, orbit2: Orbit):
from mochaaudio import pcm_to_wav, play_file
"""Play wave of plot_distance
Encoding | Signed 16-bit PCM
Byte order | little endian
Channels | 1 channel mono
"""
print("* recording")
# begin plot_distance
# if the product of these is 44100 it will last 1s
resolution = 441
orbits = 100 # this will be close to the output frequency
outerp = max([orbit1, orbit2], key=lambda x: x.p).p
ts = [(t*outerp/resolution) for t in range(orbits*resolution)]
xs = [orbit1.distance_to(orbit2, t) for t in ts]
# normalize xs to [-1, 1]
xs_m, xs_M = np.amin(xs), np.amax(xs)
xs = np.array([2 * (i-xs_m)/(xs_M-xs_m) - 1 for i in xs])
# print(xs, np.amin(xs), np.amax(xs))
frames = (0x7FFF * np.array(xs)).astype(np.int16)
# print(frames, np.amin(frames), np.amax(frames))
# end plot_distance
print("* done recording")
# with open('audacity.txt', 'wb+') as file:
# file.write(frames.tobytes())
pcm_to_wav(frames.tobytes())
# play audio
play_file('output.wav')
def value_parse(value) -> float:
try:
return eval(value) if isinstance(value, str) else value
except (NameError, SyntaxError):
return value
def convert_atmosphere(data: dict) -> Atmosphere:
return Atmosphere(**{key: value_parse(value) for key, value in data.items()})
def convert_orbit(data: dict, current_universe: dict) -> Orbit:
out = Orbit(**{key: value_parse(value) for key, value in data.items()})
# string -> bod
if isinstance(out.parent, str):
out.properties['parent'] = value_parse(out.parent)
# still string -> parent doesn't exist as a var maybe
if isinstance(out.parent, str):
out.properties['parent'] = current_universe[out.parent]
return out
def convert_rotation(data: dict) -> Rotation:
return Rotation(**{key: value_parse(value) for key, value in data.items()})
def convert_body(data: dict, current_universe: dict, datatype=Body) -> Body:
body_data = {}
for key, value in data.items():
if key == 'atmosphere':
body_data[key] = convert_atmosphere(value)
elif key == 'orbit':
body_data[key] = convert_orbit(value, current_universe)
elif key == 'rotation':
body_data[key] = convert_rotation(value)
else:
body_data[key] = value_parse(value)
return datatype(**body_data)
def load_data(seed: dict) -> dict:
import os
from json import load
loc = os.path.dirname(os.path.abspath(__file__)) + '\\mochaastro'
universe_data = seed
for file in [f for f in os.listdir(loc) if f.endswith('.json')]:
print('Loading', file, '...')
json_data = load(open(loc + '\\' + file, 'r'))
for obj in json_data:
universe_data[obj['name']] = convert_body(obj, universe_data, (Star if obj['class'] == 'star' else Body))
return universe_data
def plot_grav_acc(body1: Body, body2: Body) -> None:
"""Plot gravitational acceleration from one body to another over several orbits"""
resolution = 1000
orbits = 8
outerp = max([body1, body2], key=lambda x: x.orbit.p).orbit.p
plt.figure(figsize=(7, 7))
plt.title('Body Delta')
plt.xlabel('time since epoch (s)')
plt.ylabel('acceleration (m/s^2)')
plt.yscale('log')
ts = [(t*outerp/resolution) for t in range(orbits*resolution)]
xs = [body1.acc_towards(body2, t) for t in ts]
plt.plot(ts, xs, color='k')
plt.show()
def plot_grav_acc_vector(body1: Body, body2: Body) -> None:
"""Plot gravitational acceleration vector from one body to another over several orbits"""
resolution = 100
orbits = 8
outerp = max([body1, body2], key=lambda x: x.orbit.p).orbit.p
fig = plt.figure(figsize=(7, 7))
ax = Axes3D(fig)
ax.set_title('Acceleration')
ax.set_xlabel('x (m/s^2)')
ax.set_ylabel('y (m/s^2)')
ax.set_zlabel('z (m/s^2)')
ts = [(t*outerp/resolution) for t in range(orbits*resolution)]
xs, ys, zs = zip(*[body1.acc_vector_towards(body2, t) for t in ts])
ax.plot(xs, ys, zs, color='k')
axisEqual3D(ax)
plt.show()
def search(name: str) -> Body:
# try exact match
if name in universe:
return universe[name]
# try case insensitive
for key, val in universe.items():
if name.lower() == key.lower():
return val
# try substring
hits = set()
for key, val in universe.items():
if name.lower() in key.lower():
hits.add(val)
if len(hits) == 1:
return list(hits)[0]
raise KeyError(hits)
def stargen(m: float) -> Star:
"""Generate star from mass"""
m /= sun.mass
# default exponents: .74,3,.505,-2.5
# I find 0.96 a better approximation than 0.74, at least for smaller stars.
# I find 0.54 a very slightly better approximation than 0.505, at least for smaller stars.
# Luminosity and time values from https://www.academia.edu/4301816/On_Stellar_Lifetime_Based_on_Stellar_Mass
# L
if m > .45:
lum = 1.148*m**3.4751
else:
lum = .2264*m**2.52
return Star(**{
'mass': m*sun.mass,
'radius': sun.radius*m**0.96,
'luminosity': sun.luminosity*lum,
'temperature': 5772*m**.54,
})
def test_functions() -> None:
print(str(earth.orbit))
print('~'*40)
print(str(keplerian(sun, earth.orbit.cartesian(0))))
def universe_sim(parent: Body, t: float=0, size: Tuple[int, int]=(1024, 640), selection: Body=None) -> None:
# TODO
# moon display
# comet tails
"""Use pygame to show the system of [parent] and all subsystems"""
import pygame
from pygame import gfxdraw # DO NOT REMOVE THIS IT BREAKS SHIT
from time import sleep, time
from math import hypot
from mochamath import dist
from mochaunits import round_time
orbit_res = 64
dot_radius = 2
black, blue, beige, white, grey = (0,)*3, (0, 0, 255), (255, 192, 128), (255,)*3, (128,)*3
red = blue[::-1]
fps = 30
timerate = 1/fps
paused = False
vectors = False
# target = parent # until user selects a new one
mouse_sensitivity = 10 # pixels
width, height = size
center = width//2, height//2
max_a = 20*parent.radius
current_a = max_a
if selection is None:
selection = parent
selection_coords = center
current_coords = selection_coords
v_exaggeration = max_a/1.4e4
pygame.init()
screen = pygame.display.set_mode(size, pygame.RESIZABLE)
refresh = pygame.display.flip
inverse_universe = {j: i for i, j in universe.items()}
font_large = 20
font_normal = 16
font_small = 12
# verify onscreen
def is_onscreen(coords: Tuple[int, int], buffer: int=0) -> bool:
x, y = coords
return -buffer <= x <= width+buffer and -buffer <= y <= height+buffer
def are_onscreen(points: tuple, buffer: int=0) -> bool:
"""return true if any onscreen"""
for point in points:
if is_onscreen(point):
return True
return False
def center_on_selection(coords: Tuple[int, int]) -> Tuple[int, int]:
return tuple(i-j+k for i, j, k in zip(coords, current_coords, center))
def coord_remap(coords: Tuple[float, float], smooth: bool=True) -> Tuple[int, int]:
a = current_a if smooth else max_a
b = height/width * a
xmap = linear_map((-a, a), (0, width))
ymap = linear_map((-b, b), (height, 0))
x, y = coords
return int(round(xmap(x))), int(round(ymap(y)))
def is_hovering(body: Body, coords: Tuple[int, int]) -> bool:
apparent_radius = body.radius * width/(2*current_a) if 'radius' in body.properties else dot_radius
return dist(coords, pygame.mouse.get_pos()) < max(mouse_sensitivity, apparent_radius)
# display body
def point(at: Tuple[int, int], radius: float, color: Tuple[int, int, int]=white, fill: bool=True, highlight: bool=False) -> None:
"""radius is in meters, NOT pixels!!!"""
star_radius = round(radius * width/(2*current_a))
r = star_radius if dot_radius < star_radius else dot_radius
x, y = at
try:
pygame.gfxdraw.aacircle(screen, x, y, r, color)
if fill:
pygame.gfxdraw.filled_circle(screen, x, y, r, color)
if highlight:
pygame.gfxdraw.aacircle(screen, x, y, 2*r, red)
except OverflowError:
if is_onscreen(at):
screen.fill(color)
# display body
def show_body(body: Body, coords: Tuple[int, int], name: str='') -> None:
"""Coords are the actual screen coords"""
hovering = is_hovering(body, coords)
r = body.radius if 'radius' in body.properties else 1
point(coords, r, white, True, hovering)
# show name
apparent_radius = r * width/(2*current_a)
name_coords = tuple(int(i+apparent_radius) for i in coords)
text(name, name_coords, font_normal, grey)
# display arrow
def arrow(a: Tuple[int, int], b: Tuple[int, int], color: Tuple[int, int, int]=red) -> None:
"""Coords are the actual screen coords"""
tip_scale = 10
# the arrow "tip"
displacement = tuple(j-i for i, j in zip(a, b))
# -> point between the - and the >
tip_base = tuple((tip_scale-3)/tip_scale * i for i in displacement)
real_tip_base = tuple(i+j for i, j in zip(a, tip_base))
# perpendicular vector 1/4 size
perpendicular = tuple((-1)**j * i/tip_scale for j, i in enumerate(displacement[::-1]))
# left tip
left_tip = tuple(i+j+k for i, j, k in zip(a, tip_base, perpendicular))
# right tip
right_tip = tuple(i+j-k for i, j, k in zip(a, tip_base, perpendicular))
# render
pygame.draw.aaline(screen, color, a, real_tip_base)
arrow_tip = left_tip, right_tip, b
if any(is_onscreen(i) for i in arrow_tip):
pygame.draw.aalines(screen, color, True, arrow_tip)
# display text
def text(string: str, at: Tuple[int, int], size: int=font_normal, color: Tuple[int, int, int]=white, shadow: bool=False) -> None:
if not is_onscreen(at):
return None
this_font = pygame.font.SysFont('Courier New', size)
string = string.replace('\t', ' '*4)
# first, shadow
if shadow:
text(string, (at[0]+1, at[1]+1), size, black)
# then, real text
for i, line in enumerate(string.split('\n')):
textsurface = this_font.render(line, True, color)
x, y = at
y += i*size
screen.blit(textsurface, (x, y))
# precompute orbits (time0, time1, ..., timeN)
def precompute_orbit(obj: Body) -> Tuple[Tuple[int, int], ...]:
return tuple(obj.orbit.cartesian(t+i*obj.orbit.p/orbit_res)[:2] for i in range(orbit_res))
def zoom(r: float=0) -> None:
nonlocal max_a
nonlocal selection_coords
max_a *= 2**r
if selection != parent:
try:
selection_coords = coord_remap(selection.orbit.cartesian(t)[:2])
except KeyError:
pass
# only get first tier, dc about lower tiers
orbits = []
for name, body in universe.items():
if 'orbit' not in body.properties or 'parent' not in body.orbit.properties:
continue
# disable comets; sharp angles
if 'class' in body.properties and body.properties['class'] == 'comet':
continue
# clean orbits of undrawable orbits
try:
body.orbit.cartesian()
except KeyError:
continue
# good orbit then!
if body.orbit.parent == parent:
color = beige if 'class' in body.properties and body.properties['class'] != 'planet' else blue
orbits.append((name, body, precompute_orbit(body), color))
# main loop
# frame
while 1:
start_time = time()
t += timerate
screen.fill(black)
# recenter
zoom()
# show bodies
# show star
if is_onscreen((0, 0)):
show_body(parent, center_on_selection(center), inverse_universe[parent])
# show planets
# for_start = time()
for name, body, orbit, color in orbits: # ~1.1 ms/body @ orbit_res = 64
# hide small orbits
if not any(20 < abs(i-j//2) for i, j in zip(coord_remap((body.orbit.a,)*2), (width, height))):
continue
# redraw orbit
coords = center_on_selection(coord_remap(body.orbit.cartesian(t)[:2]))
hovering = is_hovering(body, coords)
points = tuple(map(center_on_selection, map(coord_remap, orbit)))
if not (are_onscreen(points) or is_onscreen(coords)):
continue
pygame.draw.lines(screen, red if hovering else color, True, points)
# planet dot
if not is_onscreen(coords):
continue
# tip of the arrow
if vectors:
# velocity
mag = hypot(*body.orbit.cartesian(t)[3:5])
vcoords = center_on_selection(coord_remap(tuple(
v_exaggeration*i+j for i, j in
zip(body.orbit.cartesian(t)[3:5], body.orbit.cartesian(t)[:2]))))
arrow(coords, vcoords)
# kinetic energy
ke_str = ''
if 'mass' in body.properties:
ke = 1/2 * Mass(body.mass) * (Length(mag)/Time(1))**2
ke_str = '\nKE = {}'.format(pretty_dim(ke, 0))
# mag
text('{}{}'.format(pretty_dim(Length(mag)/Time(1)), ke_str), vcoords, font_normal, red)
show_body(body, coords, name)
# change selection?
if hovering:
if pygame.mouse.get_pressed()[0]:
selection = body
zoom()
elif pygame.mouse.get_pressed()[2]:
universe_sim(body, t, (width, height))
# print((time()-for_start)/len(orbits))
# print date
try:
current_date = str(round_time(epoch+timedelta(seconds=t)))
except OverflowError:
current_date = '>10000' if 0 < t else '<0'
information = current_date + ' (x{0}){1}'.format(int(fps*timerate), ' [PAUSED]' if paused else '') + '\n' + \
'Width: '+pretty_dim(Length(2*current_a, 'astro'))
text(information, (0, height-font_large*2), font_large, white, True)
# print FPS
text(str(round(1/(time()-start_time)))+' FPS', (width-font_normal*4, 0), font_normal, red)
# print selection data
text(selection.data, (0, 0), font_small, (200, 255, 200), True)
# refresh
refresh()
# post-render operations
# event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
return print('Thank you for using mochaastro2.py!~')
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP_PLUS: # zoom in
zoom(-1)
elif event.key == pygame.K_KP_MINUS: # zoom out
zoom(1)
elif event.key == pygame.K_PERIOD: # timerate up
timerate *= 2
elif event.key == pygame.K_COMMA: # timerate down
timerate /= 2
elif event.key == pygame.K_p: # pause
timerate, paused = paused, timerate
elif event.key == pygame.K_r: # reverse
timerate = -timerate
elif event.key == pygame.K_v: # vectors
vectors = not vectors
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # check for recenter on parent
if is_hovering(parent, center_on_selection(center)):
selection = parent
selection_coords = center
elif event.button == 3: # check for "go back!"
if 'orbit' in parent.properties and 'parent' in parent.orbit.properties and \
not any(is_hovering(body, center_on_selection(coord_remap(body.orbit.cartesian(t)[:2]))) for _, body, _, _ in orbits):
universe_sim(parent.orbit.parent, t, (width, height), selection)
elif event.button == 4: # zoom in
zoom(-1)
elif event.button == 5: # zoom out
zoom(1)
elif event.type == pygame.VIDEORESIZE:
width, height = event.size
center = width//2, height//2
selection_coords = center
zoom()
pygame.display.set_mode(event.size, pygame.RESIZABLE)
# print(event.size) # debug
# smooth zoom
current_a = (max_a + current_a)/2
current_coords = tuple((i + j)//2 for i, j in zip(selection_coords, current_coords))
# refresh title
title = '{}, {} System - {}'.format(inverse_universe[selection], inverse_universe[parent], current_date)
pygame.display.set_caption(title)
# sleep
wait_time = 1/fps - (time() - start_time)
if 0 < wait_time:
sleep(wait_time)
def warnings() -> None:
"""Attempt to find missing data"""
for name, body in universe.items():
# print('Checking {}...'.format(name))
# check for missing albedo data
if 'albedo' not in body.properties:
if 6e19 < body.mass < 1.5e29 and body.orbit.a < 600*au:
print('Albedo data missing from {}, should be easy to find'.format(name))
# check for missing atm data
elif 'atmosphere' not in body.properties and body.atm_retention < .131 and body.orbit.a < 67*au:
print('Atmosphere data missing from {}, atmosphere predicted'.format(name))
# bodies - this file only contains the sun, moon, and planets. json files provide the rest.
# USE BOND ALBEDO PLEASE
sun = Star(**{
'orbit': Orbit(**{
'sma': 2.7e20,
}),
'rotation': Rotation(**{
'period': 25.05*day,
'tilt': .127,
'ra': 286.13*deg,
'dec': 63.87*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 6050,
'surface_pressure': .4,
}),
'mass': 1.9885e30,
'radius': 6.957e8,
'composition': {
'H': .7346,
'He': .2483,
'O': .0077,
'C': .0029,
'Fe': .0016,
'Ne': .0012,
'N': .0009,
'Si': .0007,
'Mg': .0005,
'S': .0004,
# https://web.archive.org/web/20151107043527/http://weft.astro.washington.edu/courses/astro557/LODDERS.pdf
'Ni': 9.1e-5,
'Cu': 1e-6,
'Pt': 2.6e-9,
'Ag': 9.4e-10,
'Au': 3.7e-10,
'U': 1.8e-11,
},
# stellar properties
'luminosity': 3.828e26,
'temperature': 5778,
})
mercury = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 5.790905e10,
'e': .20563,
'i': .1223,
'lan': .84354,
'aop': .50831,
'man': 3.05077,
}),
'rotation': Rotation(**{
'period': 58.646 * day,
'tilt': .00059, # to orbit
'ra': 281.01*deg,
'dec': 61.42*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 26000,
'surface_pressure': 5e-10,
}),
'mass': 3.3011e23,
'radius': 2.4397e6,
'albedo': .088,
})
venus = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 1.08208e11,
'e': .006772,
'i': .0592466,
'lan': 1.33832,
'aop': .95791,
'man': .87467,
}),
'rotation': Rotation(**{
'period': 243.025 * day,
'tilt': 3.0955,
'ra': 272.76*deg,
'dec': 67.16*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 15900,
'surface_pressure': 9.2e6,
'composition': {
'CO2': .965,
'N2': .035,
'SO2': 1.5e-4,
'Ar': 7e-5,
'H2O': 2e-5,
'CO': 1.7e-5,
'He': 1.2e-5,
'Ne': 7e-6,
'HCl': 4e-7,
'HF': 3e-9,
},
}),
'mass': 4.8675e24,
'radius': 6.0518e6,
'albedo': .76,
})
earth = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 1.49598023e11,
'e': .0167086,
'i': 0, # by definition
'lan': 0, # -.1965352,
'aop': 0, # 1.9933027,
'man': .1249,
}),
'rotation': Rotation(**{
'period': 86164.100352,
'tilt': .4090926295,
'ra': 0,
'dec': 90*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 8500,
'surface_pressure': 101325,
'composition': { # https://en.wikipedia.org/wiki/Atmospheric_chemistry#Atmospheric_composition
'N2': .78084,
'O2': .20946,
'H2O': .01,
'Ar': .00934,
'CO2': 4.08e-4,
'Ne': 1.818e-5,
'He': 5.24e-6,
'CH4': 1.87e-6,
'Kr': 1.14e-6,
'H2': 5.5e-7,
'N2O': 9e-8,
'NO2': 2e-8,
},
}),
'composition': { # by mass https://en.wikipedia.org/wiki/Abundance_of_the_chemical_elements#Earth
'Fe': .319,
'O': .297,
'Si': .161,
'Mg': .154,
'Ni': 1.822e-2,
'Ca': 1.71e-2,
'Al': 1.59e-2,
'S': 6.35e-3,
'Cr': 4.7e-3,
'Na': 1.8e-3,
'Mn': 1.7e-3,
'P': 1.21e-3,
'Co': 8.8e-4,
'Ti': 8.1e-4,
'C': 7.3e-4,
'H': 2.6e-4,
'K': 1.6e-4,
'V': 1.05e-4,
'Cl': 7.6e-5,
'Cu': 6e-5,
'Zn': 4e-5,
'N': 2.5e-5,
'Sr': 1.3e-5,
'Sc': 1.1e-5,
'F': 1e-5,
'Zr': 7.1e-6,
'Ge': 7e-6,
'Ba': 4.5e-6,
'Ga': 3e-6,
'Y': 2.9e-6,
'Se': 2.7e-6,
'Pt': 1.9e-6,
'As': 1.7e-6,
'Mo': 1.7e-6,
'Ru': 1.3e-6,
'Ce': 1.13e-6,
'Li': 1.1e-6,
'Pd': 1e-6,
'Os': 9e-7,
'Ir': 9e-7,
'Nd': 8.4e-7,
'Dy': 4.6e-7,
'Nb': 4.4e-7,
'La': 4.4e-7,
'Rb': 4e-7,
'Gd': 3.7e-7,
'Br': 3e-7,
'Te': 3e-7,
'Er': 3e-7,
'Yb': 3e-7,
'Sm': 2.7e-7,
'Sn': 2.5e-7,
'Rh': 2.4e-7,
'Pb': 2.3e-7,
'B': 2e-7,
'Hf': 1.9e-7,
'Pr': 1.7e-7,
'W': 1.7e-7,
'Au': 1.6e-7,
'Eu': 1e-7,
'Ho': 1e-7,
'Cd': 8e-8,
'Re': 8e-8,
'Tb': 7e-8,
'Th': 6e-8,
'Be': 5e-8,
'Ag': 5e-8,
'Sb': 5e-8,
'I': 5e-8,
'Tm': 5e-8,
'Lu': 5e-8,
'Cs': 4e-8,
'Ta': 3e-8,
'Hg': 2e-8,
'U': 2e-8,
'In': 1e-8,
'Tl': 1e-8,
'Bi': 1e-8,
},
'mass': 5.97237e24,
'radius': 6.371e6,
'albedo': .306,
})
moon = Body(**{
'orbit': Orbit(**{
'parent': earth,
'sma': 3.84399e8,
'e': .0549,
'i': .0898,
'lan': 0, # unknown
'aop': 0, # unknown
'man': 0, # unknown
}),
'rotation': Rotation(**{
'period': 27.321661*day,
'tilt': .02692,
'ra': 270*deg,
'dec': 66.54*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 41860,
'surface_pressure': 1e-7,
}),
'composition': { # https://www.permanent.com/l-apollo.htm
'O': .446,
'Si': .21,
'Al': .133,
'Ca': .1068,
'Fe': .0487,
'Mg': .0455,
'Na': 3.1e-3,
'Ti': 3.1e-3,
'Cr': 8.5e-4,
'K': 8e-4,
'Mn': 6.75e-4,
'P': 5e-4,
'C': 1e-4,
'H': 5.6e-5,
'Cl': 1.7e-5,
},
'mass': 7.342e22,
'radius': 1.7371e6,
'albedo': .136,
})
mars = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 2.279392e11,
'e': .0934,
'i': .03229,
'lan': .86495,
'aop': 5.0004,
'man': 0.33326,
}),
'rotation': Rotation(**{
'period': 1.025957*day,
'tilt': .4396, # to orbital plane
'ra': 317.68*deg,
'dec': 52.89*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 11100,
'surface_pressure': 636,
'composition': {
'CO2': .949,
'N2': .026,
'Ar': .019,
'O2': .00174,
'CO': .000747,
'H2O': .0003,
},
}),
'mass': 6.4171e23,
'radius': 3.3895e6,
'albedo': .25,
})
jupiter = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 5.2044*au,
'e': .0489,
'i': .02774,
'lan': 1.75343,
'aop': 4.77988,
'man': .34941,
}),
'rotation': Rotation(**{
'period': 9.925*hour,
'tilt': .0546, # to orbit
'ra': 268.05*deg,
'dec': 64.49*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 27000,
'surface_pressure': 7e5,
}),
'mass': 1.8982e27,
'radius': 6.9911e7,
'albedo': .503,
})
saturn = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 9.5826*au,
'e': .0565,
'i': .04337,
'lan': 1.98383,
'aop': 5.92351,
'man': 5.53304,
}),
'rotation': Rotation(**{
'period': 10*hour + 33*minute + 38,
'tilt': .4665, # to orbit
'ra': 40.60*deg,
'dec': 83.54*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 59500,
'surface_pressure': 1.4e5,
}),
'mass': 5.6834e26,
'radius': 5.8232e7,
'albedo': .342,
})
uranus = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 19.2184*au,
'e': .046381,
'i': .0135,
'lan': 1.2916,
'aop': 1.6929494,
'man': 2.48253189,
}),
'rotation': Rotation(**{
'period': .71833*day,
'tilt': 1.706, # to orbit
'ra': 257.43*deg,
'dec': -15.10*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 27700,
}),
'mass': 8.681e25,
'radius': 2.5362e7,
'albedo': .3,
})
neptune = Body(**{
'orbit': Orbit(**{
'parent': sun,
'sma': 30.11*au,
'e': .009456,
'i': .03085698,
'lan': 2.30006,
'aop': 4.82297,
'man': 4.47202,
}),
'rotation': Rotation(**{
'period': .6713*day,
'tilt': .4943, # to orbit
'ra': 299.36*deg,
'dec': 43.46*deg,
}),
'atmosphere': Atmosphere(**{
'scale_height': 19700,
}),
'mass': 1.02413e26,
'radius': 2.4622e7,
'albedo': .29,
})
# inner_solar_system = System(mercury, venus, earth, mars) # a <= mars
# solar_system = System(mercury, venus, earth, mars, jupiter, saturn, uranus, neptune) # known planets
# jupiter_system = System(io, europa, ganymede, callisto)
# kuiper = System(neptune, pons_gambart, pluto, ikeya_zhang, eris, sedna, planet_nine) # a >= neptune
# comets = System(earth, halley, pons_gambart, ikeya_zhang) # earth and comets
solar_system = {
'Sun': sun,
'Mercury': mercury,
'Venus': venus,
'Earth': earth,
'Moon': moon,
'Mars': mars,
'Jupiter': jupiter,
'Saturn': saturn,
'Uranus': uranus,
'Neptune': neptune,
}
solar_system_object = System(sun, *(i for i in solar_system.values() if i is not sun))
universe = load_data(solar_system.copy())
# planet_nine.orbit.plot
# distance_audio(earth.orbit, mars.orbit)
# solar_system.sim()
# burn = earth.orbit.transfer(mars.orbit)
# universe_sim(sun)
|
gpl-3.0
|
JOHNKYON/DSTC
|
DSTC2/basic.py
|
1
|
1296
|
# -*- coding:utf-8 -*-
from sklearn.cross_validation import train_test_split
from DSTC2.traindev.scripts import myLogger
from DSTC2.traindev.scripts.model import bp
from traindev.scripts import file_reader
from traindev.scripts import initializer
from traindev.scripts.initializer import Set
__author__ = "JOHNKYON"
global logger
if __name__ == "__main__":
global logger
logger = myLogger.myLogger("basic")
logger.info("Starting basic")
# 选择模式
dataset = file_reader.get_dataset("dstc2_debug")
logger.info("token check test begin")
raw = initializer.raw_initializer(dataset)
# Build token and dictionary
token = initializer.token_initializer(raw["input"])
dictionary = initializer.dictionary_initializer(token)
# Build input vector
one_set = Set(token, dictionary, raw["output"])
input_mtr, output_mtr = bp.bp_initialize(one_set.input_mtr, one_set.output_mtr)
# get model
model = bp.bp_builder(one_set.sentence_length * one_set.sentence_count, len(one_set.act_dict) * one_set.sentence_count)
# train
X_train, X_test, y_train, y_test = train_test_split(input_mtr, output_mtr, test_size=0.2)
model.fit(X_train, y_train, batch_size=2, nb_epoch=5)
# test
print model.evaluate(X_test, y_test, batch_size=2)
|
mit
|
detrout/debian-statsmodels
|
statsmodels/sandbox/distributions/otherdist.py
|
33
|
10145
|
'''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
http://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
from __future__ import print_function
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parameterized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parameterized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
|
bsd-3-clause
|
Winterflower/mdf
|
mdf/viewer/panels/plotpanel.py
|
3
|
2675
|
"""
Panel for showing graphs
"""
import wx
import numpy as np
# force matplotlib to use whatever wx is installed
import sys
sys.frozen = True
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
class PlotPanel(wx.Panel):
"""
The PlotPanel has a Figure and a Canvas. OnSize events simply set a
flag, and the actual resizing of the figure is triggered by an Idle event.
See:
http://www.scipy.org/Matplotlib_figure_in_a_wx_panel
"""
def __init__(self, parent, dataframes, color=None, dpi=None, **kwargs):
# initialize Panel
if 'id' not in kwargs.keys():
kwargs['id'] = wx.ID_ANY
if 'style' not in kwargs.keys():
kwargs['style'] = wx.NO_FULL_REPAINT_ON_RESIZE
wx.Panel.__init__(self, parent, **kwargs)
self.parent = parent
self.dataframes = dataframes
# initialize matplotlib stuff
self.figure = Figure(None, dpi)
self.figure.autofmt_xdate()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.SetColor(color)
#self._SetSize((800, 600))
self.draw()
self._resizeflag = False
self.Bind(wx.EVT_IDLE, self._onIdle)
self.Bind(wx.EVT_SIZE, self._onSize)
def SetColor(self, rgbtuple=None):
"""Set figure and canvas colours to be the same."""
if rgbtuple is None:
rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()
clr = [c/255. for c in rgbtuple]
self.figure.set_facecolor( clr )
self.figure.set_edgecolor( clr )
self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))
def _onSize(self, event):
self._resizeflag = True
def _onIdle(self, evt):
if self._resizeflag:
self._resizeflag = False
self._SetSize()
def _SetSize(self, size=None):
if size is None:
size = tuple(self.GetClientSize())
self.SetSize(size)
self.canvas.SetSize(size)
self.figure.set_size_inches(float(size[0])/self.figure.get_dpi(),
float(size[1])/self.figure.get_dpi())
def draw(self):
ax = self.figure.add_subplot(111)
for dataframe in self.dataframes:
x = dataframe.index
for col in dataframe.columns:
empty = dataframe[col].count() == 0
y = dataframe[col].values if not empty else np.zeros(x.shape)
ax.plot(x, y, label=col)
try:
self.figure.autofmt_xdate()
except:
pass
ax.legend(loc="best")
ax.grid()
|
mit
|
glennq/scikit-learn
|
examples/ensemble/plot_random_forest_regression_multioutput.py
|
46
|
2640
|
"""
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/tests/tools/test_numeric.py
|
2
|
14261
|
import pytest
import decimal
import numpy as np
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
from numpy import iinfo
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with tm.assert_raises_regex(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with tm.assert_raises_regex(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = (['1.1', 2, 3],
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00])
expected = (np.array([1.1, 2, 3], dtype=np.float64),
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
for _data, _expected in zip(data, expected):
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(_data, downcast=downcast)
tm.assert_numpy_array_equal(res, _expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1])
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
|
bsd-3-clause
|
khkaminska/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
thorwhalen/ut
|
aw/khan01_spike.py
|
1
|
12446
|
"""Travel adwords tools"""
__author__ = 'thorwhalen'
import numpy as np
from ut.datapath import datapath
import ut.daf.diagnosis as daf_diagnosis
import ut.parse.google as google
import ut.parse.util as parse_util
import pandas as pd
import ut.daf.ch as daf_ch
import os
import re
from ut.pstr.trans import toascii
from . import reporting
from datetime import datetime
from ut.util.ulist import ascertain_list
from ut.util.ulist import all_true
from ut.pstr.trans import to_unicode_or_bust
from serialize.data_accessor import DataAccessor
split_exp = re.compile("[^&\w]*")
travel_domain_list = ['expedia', 'tripadvisor', 'trivago', 'marriott', 'booking', 'hotels', 'lastminute', 'accorhotels',
'kayak', 'venere', 'hilton', 'hotelscombined', 'agoda', 'choicehotels', 'travelocity',
'travelsupermarket', 'bestwestern', 'laterooms', 'radissonblu', 'hotwire', 'lonelyplanet',
'orbitz', 'starwoodhotels', 'frommers', 'hotel', 'hotelclub', 'hrs', 'novotel', 'wego', 'wotif',
'hoteltravel', 'hyatt', 'ibis', 'ihg', 'mercure', 'priceline', 'qualityinn',
'beoo', 'easytobook', 'ebookers', 'hostelbookers', 'lq', 'melia', 'millenniumhotels', 'mrandmrssmith',
'nh-hotels', 'ratestogo', 'sofitel', 'tablethotels', 'travelandleisure']
html_data = DataAccessor('html/google_results_tests')
def get_search_term_html(search_term):
file_name = html_data+'.html'
try:
return html_data.loads(file_name)
except:
IOError("didn't find %s" % html_data.filepath(file_name))
def google_light_parse(gresult):
gresult = parse_util.x_to_soup(gresult)
parse_dict = dict()
resultStats = input.find(name='div',attrs={'id':'resultStats'})
if resultStats:
parse_dict['_resultStats'] = google.parse_number_of_results(resultStats)
def save_search_term_that_does_not_have_num_of_results(search_term):
print("no num_of_results in: %s" % search_term)
def add_travel_score(query_report_df, html_folder=datapath(),ext='.html'):
pass
# TODO: Continue coding add_travel_score()
def mk_search_term_domains_df(query_report_df, html_folder=datapath(),ext='.html'):
search_terms = np.unique(list(query_report_df['search_term']))
domain_lists = []
search_term_list = []
for st in search_terms:
filename = os.path.join(html_folder,st+ext)
print(filename)
if os.path.exists(filename):
search_term_list.append(st)
domain_lists.append(get_domain_list_from_google_results(filename))
return pd.DataFrame({'search_term':search_term_list, 'domain_list':domain_lists})
def add_search_term_ndups(df, count_var='ndups'):
d = df[['search_term']].groupby('search_term').count()
d.columns = [count_var]
return df.merge(d, left_on='search_term', right_index=True)
def add_target_scores(query_report_df):
vars_to_keep = ['search_term','impressions','destination','ad_group','destination_imps_freq_fanout_ratio','ad_group_imps_freq_fanout_ratio']
query_report_df = add_query_fanout_scores(query_report_df)
query_report_df = query_report_df[vars_to_keep]
query_report_df = daf_ch.ch_col_names(query_report_df,
['ad_group_score','destination_score'],
['ad_group_imps_freq_fanout_ratio','destination_imps_freq_fanout_ratio'])
query_report_df = query_report_df.sort(columns=['search_term','destination_score','ad_group_score'])
return query_report_df
def add_query_fanout_scores(query_report_df):
if 'destination' not in query_report_df.columns:
query_report_df = add_destination(query_report_df)
ad_group_fanout = mk_query_fanout_scores(query_report_df,target='ad_group',statVars='impressions',keep_statVars=False)
ad_group_fanout = daf_ch.ch_col_names(ad_group_fanout,
['ad_group_imps_freq_fanout_ratio','ad_group_count_fanout_ratio'],
['impressions_freq_fanout_ratio','impressions_count_fanout_ratio'])
destination_fanout = mk_query_fanout_scores(query_report_df,target='destination',statVars='impressions',keep_statVars=False)
destination_fanout = daf_ch.ch_col_names(destination_fanout,
['destination_imps_freq_fanout_ratio','destination_count_fanout_ratio'],
['impressions_freq_fanout_ratio','impressions_count_fanout_ratio'])
query_report_df = query_report_df.merge(ad_group_fanout,on=['search_term','ad_group'])
query_report_df = query_report_df.merge(destination_fanout,on=['search_term','destination'])
return query_report_df
def mk_query_fanout_scores(query_report_df, target='ad_group', statVars='impressions',keep_statVars=False):
# target = ascertain_list(target)
# if ('destination' in target) and ('destination' not in query_report_df.columns):
# query_report_df['destination'] = query_report_df['ad_group'].apply(lambda x : re.match('[^|]*',x).group(0))
# if not all_true([x in query_report_df.columns for x in target]):
# raise ValueError("the dataframe doesn't have the column %s and I don't know how to make it" % target)
if target not in query_report_df.columns:
if target=='destination':
query_report_df = add_destination(query_report_df)
else:
raise ValueError("the dataframe doesn't have the column %s and I don't know how to make it" % target)
return daf_diagnosis.mk_fanout_score_df(query_report_df,fromVars=['search_term'],toVars=target,statVars=statVars,keep_statVars=keep_statVars)
def add_destination(query_report_df):
assert 'ad_group' in query_report_df.columns, "you must have the variable ad_group to infer destination"
# get the destination as the substring before the first |
query_report_df['destination'] = query_report_df['destination'] = \
query_report_df['ad_group'].apply(lambda x : re.match('[^|]*',x).group(0))
return query_report_df
def get_domain_list_from_google_results(gresults):
domain_list = []
if not isinstance(gresults,dict): # assume it's a soup, html, or filename thereof
gresults = google.parse_tag_dict(google.mk_gresult_tag_dict(gresults))
# if not, assume the input is a info_dict
if 'organic_results_list' in gresults:
domain_list = domain_list + [x['domain'] for x in gresults['organic_results_list'] if 'domain' in x]
if 'top_ads_list' in gresults:
domain_list = domain_list + [x['disp_url_domain'] for x in gresults['top_ads_list'] if 'disp_url_domain' in x]
if 'organic_results_list' in gresults:
domain_list = domain_list + [x['disp_url_domain'] for x in gresults['organic_results_list'] if 'disp_url_domain' in x]
return domain_list
def similarity_with_travel_domains(domain_list):
if isinstance(domain_list, pd.DataFrame):
domain_list = list(domain_list.index)
return len(set(domain_list).intersection(set(travel_domain_list)))\
/ float(len(domain_list))
###############################################################################################
## SEMANTIC STUFF
def mk_term_count_from_google_results(gresults):
"""
takes a google result (in the form of html, filename thereof, soup, or info_dict, and
returns a Series whose indices are terms and values are term counts
(the number of times the term appeared in the google result)
"""
# get preprocessed text from gresults
gresults = process_text_for_word_count(mk_text_from_google_results(gresults))
# tokenize this text
toks = tokenize_text(gresults)
# make a dataframe of term counts TODO: Explore faster ways to do this
df = pd.DataFrame(toks,columns=['token'])
df = df.groupby('token').count()
df.columns = ['count']
df = df.sort(columns=['count'],ascending=False) # TODO: Take out sorting at some point since it's unecessary (just for diagnosis purposes)
return df
def tokenize_text(gresult_text):
return re.split(split_exp,gresult_text)
def process_text_for_word_count(text):
"""
Preprocesses the text before it will be fed to the tokenizer.
Here, we should put things like lower-casing the text, casting letters to "simple" ("ascii", "non-accentuated")
letters, replacing some common strings (such as "bed and breakfast", "New York" by singular token representatives
such as "b&b", "new_york"), and what ever needs to be done before tokens are retrieved from text.
"""
return toascii(to_unicode_or_bust(text)).lower()
def mk_text_from_google_results(gresults):
if not isinstance(gresults,dict): # if not a dict assume it's a soup, html, or filename thereof
gresults = google.parse_tag_dict(google.mk_gresult_tag_dict(gresults))
if 'organic_results_list' in gresults:
title_text_concatinated = ' '.join([x['title_text'] for x in gresults['organic_results_list'] if 'title_text' in x])
snippet_text_concatinated = ' '.join([x['st_text'] for x in gresults['organic_results_list'] if 'st_text' in x])
text_concatinated = title_text_concatinated + ' ' + snippet_text_concatinated
else:
search_for_tag = ['_ires','_search','_res','_center_col']
for t in search_for_tag:
if t in gresults:
text_concatinated = soup_to_text(gresults[t])
break
if not text_concatinated: # if you still don't have anything
text_concatinated = soup_to_text(gresults) #... just get the text from the whole soup
return text_concatinated
def soup_to_text(element):
return list(filter(visible, element.findAll(text=True)))
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
###############################################################################################
## MAKING DATA
def mk_df_of_travel_domains():
# set up resources
html_folder = '/D/Dropbox/dev/py/data/html/google_results_tests/'
file_list = ['hotel - 100 Google Search Results.html',
'find hotel deals - 100 Google Search Results.html',
'hotel travel sites - 100 Google Search Results.html',
'find hotels - 100 Google Search Results.html',
'hotel paris - 100 Google Search Results.html',
'hotel rome - 100 Google Search Results.html',
'hotel london - 100 Google Search Results.html',
'hotel nyc - 100 Google Search Results.html',
'hotels in france - 100 Google Search Results.html',
'hotels in italy - 100 Google Search Results.html'
]
filepath_list = [os.path.join(html_folder,f) for f in file_list]
# parse all this
r = [google.mk_gresult_tag_dict(f) for f in filepath_list]
r = [google.parse_tag_dict(f) for f in r]
# make domain lists
org_domain_list = []
ads_domain_list = []
tads_domain_list = []
for rr in r:
rrr = rr['organic_results_list']
org_domain_list = org_domain_list + [x['domain'] for x in rrr if 'domain' in x]
rrr = rr['rhs_ads_list']
ads_domain_list = ads_domain_list + [x['disp_url_domain'] for x in rrr if 'disp_url_domain' in x]
rrr = rr['top_ads_list']
ads_domain_list = ads_domain_list + [x['disp_url_domain'] for x in rrr if 'disp_url_domain' in x]
domain_list = org_domain_list + ads_domain_list
print("number of org_domain_list entries = %d" % len(org_domain_list))
print("number of ads_domain_list entries = %d" % len(ads_domain_list))
print("number of (all) domain_list entries = %d" % len(domain_list))
# make a dataframe counting the number of times we encouter each domain
df = pd.DataFrame(domain_list,columns=['domain'])
dg = df.groupby('domain').count() #agg([('domain_count','len')])
dg = daf_ch.ch_col_names(dg,'count','domain')
thresh = 4
print("length before removing count<%d entries = %d" % (thresh,len(dg)))
dg = dg[dg['count']>=thresh]
print("length before removing count<%d entries = %d" % (thresh,len(dg)))
dg['frequency'] = dg['count']/float(max(dg['count']))
dg = dg.sort(columns=['count'],ascending=False)
dg.head(30)
# return this!
return dg
|
mit
|
winklerand/pandas
|
pandas/tests/dtypes/test_dtypes.py
|
2
|
23871
|
# -*- coding: utf-8 -*-
import re
import pytest
from itertools import product
import numpy as np
import pandas as pd
from pandas import (
Series, Categorical, CategoricalIndex, IntervalIndex, date_range)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
IntervalDtype, CategoricalDtype)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
is_period_dtype, is_period,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype, is_interval_dtype,
is_datetime64_any_dtype, is_string_dtype,
_coerce_to_dtype)
import pandas.util.testing as tm
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError,
"CategoricalIndex.* must be called"):
CategoricalDtype("category")
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_coerce_to_dtype(self):
assert (_coerce_to_dtype('datetime64[ns, US/Eastern]') ==
DatetimeTZDtype('ns', 'US/Eastern'))
assert (_coerce_to_dtype('datetime64[ns, Asia/Tokyo]') ==
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
assert (DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)) ==
DatetimeTZDtype('ns', tz))
def test_empty(self):
dt = DatetimeTZDtype()
with pytest.raises(AttributeError):
str(dt)
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_coerce_to_dtype(self):
assert _coerce_to_dtype('period[D]') == PeriodDtype('period[D]')
assert _coerce_to_dtype('period[3M]') == PeriodDtype('period[3M]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
assert is_period(pidx)
s = Series(pidx, name='A')
# dtypes
# series results in object dtype currently,
# is_period checks period_arraylike
assert not is_period_dtype(s.dtype)
assert not is_period_dtype(s)
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
assert not is_period(np.dtype('float64'))
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
IntervalDtype('xx')
for s in ['interval[int64]', 'Interval[int64]', 'int64']:
i = IntervalDtype(s)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
def test_construction_generic(self):
# generic
i = IntervalDtype('interval')
assert i.subtype == ''
assert is_interval_dtype(i)
assert str(i) == 'interval[]'
i = IntervalDtype()
assert i.subtype is None
assert is_interval_dtype(i)
assert str(i) == 'interval'
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_identity(self):
assert (IntervalDtype('interval[int64]') ==
IntervalDtype('interval[int64]'))
def test_coerce_to_dtype(self):
assert (_coerce_to_dtype('interval[int64]') ==
IntervalDtype('interval[int64]'))
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('foo')
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('interval[foo]')
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('foo[int64]')
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
# dtypes
# series results in object dtype currently,
assert not is_interval_dtype(s.dtype)
assert not is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
class TestCategoricalDtypeParametrized(object):
@pytest.mark.parametrize('categories, ordered', [
(['a', 'b', 'c', 'd'], False),
(['a', 'b', 'c', 'd'], True),
(np.arange(1000), False),
(np.arange(1000), True),
(['a', 'b', 10, 2, 1.3, True], False),
([True, False], True),
([True, False], False),
(pd.date_range('2017', periods=4), True),
(pd.date_range('2017', periods=4), False),
])
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ['a', 'b']
c1 = CategoricalDtype(categories, ordered=False)
c2 = CategoricalDtype(categories, ordered=True)
assert c1 is not c2
def test_unordered_same(self):
c1 = CategoricalDtype(['a', 'b'])
c2 = CategoricalDtype(['b', 'a'])
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(['a', 'b', 'c'])
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is False
def test_equal_but_different(self):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize('v1, v2', [
([1, 2, 3], [1, 2, 3]),
([1, 2, 3], [3, 2, 1]),
])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1)
c2 = CategoricalDtype(v2, ordered=True)
assert c1 is not c2
def test_nan_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, 1])
def test_same_categories_different_order(self):
c1 = CategoricalDtype(['a', 'b'], ordered=True)
c2 = CategoricalDtype(['b', 'a'], ordered=True)
assert c1 is not c2
@pytest.mark.parametrize('ordered, other, expected', [
(True, CategoricalDtype(['a', 'b'], True), True),
(False, CategoricalDtype(['a', 'b'], False), True),
(True, CategoricalDtype(['a', 'b'], False), False),
(False, CategoricalDtype(['a', 'b'], True), False),
(True, CategoricalDtype([1, 2], False), False),
(False, CategoricalDtype([1, 2], True), False),
(False, CategoricalDtype(None, True), True),
(True, CategoricalDtype(None, True), True),
(False, CategoricalDtype(None, False), True),
(True, CategoricalDtype(None, False), True),
(True, 'category', True),
(False, 'category', True),
(True, 'not a category', False),
(False, 'not a category', False),
])
def test_categorical_equality(self, ordered, other, expected):
c1 = CategoricalDtype(['a', 'b'], ordered)
result = c1 == other
assert result == expected
def test_invalid_raises(self):
with tm.assert_raises_regex(TypeError, 'ordered'):
CategoricalDtype(['a', 'b'], ordered='foo')
with tm.assert_raises_regex(TypeError, 'collection'):
CategoricalDtype('category')
def test_mixed(self):
a = CategoricalDtype(['a', 'b', 1, 2])
b = CategoricalDtype(['a', 'b', '1', '2'])
assert hash(a) != hash(b)
def test_from_categorical_dtype_identity(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# Identity test for no changes
c2 = CategoricalDtype._from_categorical_dtype(c1)
assert c2 is c1
def test_from_categorical_dtype_categories(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override categories
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[2, 3])
assert result == CategoricalDtype([2, 3], ordered=True)
def test_from_categorical_dtype_ordered(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, ordered=False)
assert result == CategoricalDtype([1, 2, 3], ordered=False)
def test_from_categorical_dtype_both(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[1, 2], ordered=False)
assert result == CategoricalDtype([1, 2], ordered=False)
def test_str_vs_repr(self):
c1 = CategoricalDtype(['a', 'b'])
assert str(c1) == 'category'
# Py2 will have unicode prefixes
pat = r"CategoricalDtype\(categories=\[.*\], ordered=False\)"
assert re.match(pat, repr(c1))
def test_categorical_categories(self):
# GH17884
c1 = CategoricalDtype(Categorical(['a', 'b']))
tm.assert_index_equal(c1.categories, pd.Index(['a', 'b']))
c1 = CategoricalDtype(CategoricalIndex(['a', 'b']))
tm.assert_index_equal(c1.categories, pd.Index(['a', 'b']))
|
bsd-3-clause
|
ua-snap/downscale
|
snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/clt_ar5_model_data_downscaling.py
|
3
|
20574
|
# # # # #
# Tool to downscale the CMIP5 data from the PCMDI group.
# # # # #
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def cru_generator( n, cru_clim_list ):
'''
generator that will produce the cru climatologies with a
generator and replicate for the total number of years in n
'''
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
for i in range( n ):
for count, j in enumerate( cru_clim_list ):
yield j
def standardized_fn_to_vars( fn ):
''' take a filename string following the convention for this downscaling and break into parts and return a dict'''
name_convention = [ 'variable', 'cmor_table', 'model', 'scenario', 'experiment', 'begin_time', 'end_time' ]
fn = os.path.basename( fn )
fn_list = fn.split( '.' )[0].split( '_' )
return { i:j for i,j in zip( name_convention, fn_list )}
def downscale( src, dst, cru, src_crs, src_affine, dst_crs, dst_affine, output_filename, dst_meta, variable,\
method='cubic_spline', operation='add', output_dtype='float32', **kwargs ):
'''
operation can be one of two keywords for the operation to perform the delta downscaling
- keyword strings are one of: 'add'= addition, 'mult'=multiplication, or 'div'=division (not implemented)
- method can be one of 'cubic_spline', 'nearest', 'bilinear' and must be input as a string.
- output_dtype can be one of 'int32', 'float32'
'''
from rasterio.warp import reproject, RESAMPLING
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
# switch to deal with numeric output dtypes
dtypes_switch = {'int32':np.int32, 'float32':np.float32}
# switch to deal with different resampling types
method_switch = { 'nearest':RESAMPLING.nearest, 'bilinear':RESAMPLING.bilinear, 'cubic_spline':RESAMPLING.cubic_spline }
method = method_switch[ method ]
# reproject src to dst
out = np.zeros( dst.shape )
reproject( src,
out,
src_transform=src_affine,
src_crs=src_crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method )
# switch to deal with different downscaling operators
operation_switch = { 'add':add, 'mult':mult, 'div':div }
downscaled = operation_switch[ operation ]( cru, out )
# reset any > 100 values to 99 if the variable is cld or hur
if variable == 'clt' or variable == 'hur' or variable == 'cld':
downscaled[ downscaled > 100 ] = 99
# give the proper fill values to the oob regions
downscaled.fill_value = dst_meta['nodata']
downscaled = downscaled.filled()
# this is a geotiff creator so lets pass in the lzw compression
dst_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **dst_meta ) as out:
out.write( downscaled.astype( dtypes_switch[ output_dtype ] ), 1 )
return output_filename
def run( args ):
'''
simple function wrapper for unpacking an argument dict
to the downscale function for getting around the single
argument pass to multiprocessing.map implementation issue.
'''
return( downscale( **args ) )
if __name__ == '__main__':
import pandas as pd
import numpy as np
import os, sys, re, xray, rasterio, glob, argparse
from rasterio import Affine as A
from rasterio.warp import reproject, RESAMPLING
from pathos import multiprocessing as mp
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-mi", "--modeled_fn", nargs='?', const=None, action='store', dest='modeled_fn', type=str, help="path to modeled input filename (NetCDF); default:None" )
parser.add_argument( "-hi", "--historical_fn", nargs='?', const=None, action='store', dest='historical_fn', type=str, help="path to historical input filename (NetCDF); default:None" )
parser.add_argument( "-o", "--output_path", action='store', dest='output_path', type=str, help="string path to the output folder containing the new downscaled outputs" )
parser.add_argument( "-bt", "--begin_time", action='store', dest='begin_time', type=str, help="string in format YYYYMM of the beginning month/year" )
parser.add_argument( "-et", "--end_time", action='store', dest='end_time', type=str, help="string in format YYYYMM of the ending month/year" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYYMM or YYYY of the beginning month and potentially (year) of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYYMM or YYYY of the ending month and potentially (year) of the climatology period" )
parser.add_argument( "-plev", "--plev", nargs='?', const=None, action='store', dest='plev', type=int, help="integer value (in millibars) of the desired pressure level to extract, if there is one." )
parser.add_argument( "-cru", "--cru_path", action='store', dest='cru_path', type=str, help="path to the directory storing the cru climatology data derived from CL2.0" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscale_operation", action='store', dest='downscale_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
# parse args
args = parser.parse_args()
# unpack args
modeled_fn = args.modeled_fn
historical_fn = args.historical_fn
output_path = args.output_path
begin_time = args.begin_time
end_time = args.end_time
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
plev = args.plev
cru_path = args.cru_path
anomalies_calc_type = args.anomalies_calc_type
metric = args.metric
downscale_operation = args.downscale_operation
ncores = args.ncores
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# [NOTE]: hardwired raster metadata meeting the ALFRESCO Model's needs for
# perfectly aligned inputs this is used as template metadata that
# is used in output generation. template raster filename below:
# '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/
# TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
meta_3338 = {'affine': A(2000.0, 0.0, -2173223.206087799,
0.0, -2000.0, 2548412.932644147),
'count': 1,
'crs': {'init':'epsg:3338'},
'driver': u'GTiff',
'dtype': 'float32',
'height': 1186,
'nodata': -3.4e+38,
'width': 3218,
'compress':'lzw'}
# output template numpy array same dimensions as the template
dst = np.empty( (1186, 3218) )
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# condition to deal with reading in historical data if needed.
if modeled_fn is not None and historical_fn is not None:
# parse the input name for some file metadata
output_naming_dict = standardized_fn_to_vars( modeled_fn )
# this is to maintain cleanliness
variable = output_naming_dict[ 'variable' ]
# read in both modeled and historical
ds = xray.open_dataset( modeled_fn )
ds = ds[ variable ].load()
clim_ds = xray.open_dataset( historical_fn )
clim_ds = clim_ds[ variable ].load()
# generate climatology / anomalies
clim_ds = clim_ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds.groupby( 'time.month' ).mean( 'time' )
# find the begin/end years of the prepped files
dates = ds.time.to_pandas()
years = dates.apply( lambda x: x.year )
begin_time = years.min()
end_time = years.max()
del clim_ds
elif historical_fn is not None and modeled_fn is None:
# parse the input name for some file metadata
output_naming_dict = standardized_fn_to_vars( historical_fn )
# this is to maintain cleanliness
variable = output_naming_dict[ 'variable' ]
# read in historical
ds = xray.open_dataset( historical_fn )
ds = ds[ variable ].load()
# generate climatology / anomalies
climatology = ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = climatology.groupby( 'time.month' ).mean( 'time' )
# find the begin/end years of the prepped files
dates = ds.time.to_pandas()
years = dates.apply( lambda x: x.year )
begin_time = years.min()
end_time = years.max()
else:
NameError( 'ERROR: must have both modeled_fn and historical_fn, or just historical_fn' )
# standardize the output pathing
if output_naming_dict[ 'variable' ] == 'clt':
variable_out = 'cld'
else:
variable_out = output_naming_dict[ 'variable' ]
output_path = os.path.join( output_path, 'ar5', output_naming_dict['model'], variable_out, 'downscaled' )
if not os.path.exists( output_path ):
os.makedirs( output_path )
# if there is a pressure level to extract, extract it
if plev is not None:
plevel, = np.where( ds.plev == plev )
ds = ds[ :, plevel[0], ... ]
climatology = climatology[ :, plevel[0], ... ]
# deal with different anomaly calculation types
if anomalies_calc_type == 'absolute':
anomalies = ds.groupby( 'time.month' ) - climatology
elif anomalies_calc_type == 'proportional':
anomalies = climatology / ds.groupby( 'time.month' )
else:
NameError( 'anomalies_calc_type can only be one of "absolute" or "proportional"' )
# some setup of the output raster metadata
time_len, rows, cols = anomalies.shape
crs = 'epsg:4326'
affine = A( *[np.diff( ds.lon )[ 0 ], 0.0, -180.0, 0.0, -np.diff( ds.lat )[ 0 ], 90.0] )
count = time_len
resolution = ( np.diff( ds.lat )[ 0 ], np.diff( ds.lon )[ 0 ] )
# close the dataset and clean it up
ds = None
# shift the grid to Greenwich Centering
dat, lons = shiftgrid( 180., anomalies[:], anomalies.lon.data, start=False )
# metadata for input?
meta_4326 = {'affine':affine,
'height':rows,
'width':cols,
'crs':crs,
'driver':'GTiff',
'dtype':np.float32,
'count':time_len,
'compress':'lzw' }
# build some filenames for the outputs to be generated
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
years = [ str(year) for year in range( begin_time, end_time + 1, 1 ) ]
# combine the months and the years
combinations = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( output_path, '_'.join([variable_out, 'metric', output_naming_dict['model'], output_naming_dict['scenario'], output_naming_dict['experiment'], month, year]) + '.tif' ) for month, year in combinations ]
# load the baseline CRU CL2.0 data
# [NOTE]: THIS ASSUMES THEY ARE THE ONLY FILES IN THE DIRECTORY -- COULD BE A GOTCHA
cru_files = glob.glob( os.path.join( cru_path, '*.tif' ) )
cru_files.sort()
cru_stack = [ rasterio.open( fn ).read( 1 ) for fn in cru_files ]
# this is a hack to make a masked array with the cru data
cru_stack = [ np.ma.masked_where( cru == cru.min(), cru ) for cru in cru_stack ]
cru_gen = cru_generator( len(output_filenames), cru_stack )
# cleanup some uneeded vars that are hogging RAM
del climatology, anomalies
# run in parallel using PATHOS
pool = mp.Pool( processes=ncores )
args_list = zip( np.vsplit( dat, time_len ), output_filenames, cru_gen )
del dat, cru_gen, cru_stack
out = pool.map( run, [{'src':src, 'output_filename':fn, 'dst':dst, 'cru':cru, 'src_crs':meta_4326[ 'crs' ], 'src_affine':meta_4326[ 'affine' ], \
'dst_crs':meta_3338[ 'crs' ], 'dst_affine':meta_3338[ 'affine' ], 'dst_meta':meta_3338, 'operation':downscale_operation, 'variable':variable } \
for src,fn,cru in args_list ] )
pool.close()
# # # # # # # # # SOME TESTING AND EXAMPLE GENERATION AREA # # # # # # # # #
# TO RUN THE CLOUDS DOWNSCALING USE THIS EXAMPLE:
# import os
# import pandas as pd
# import numpy as np
# # change to the script repo
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# # to run the futures:
# prepped_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/cmip5_clt_nonstandard/prepped'
# file_groups = [ [os.path.join(root,f) for f in files] for root, sub, files in os.walk( prepped_dir ) if len(files) > 0 and files[0].endswith('.nc') ]
# def make_rcp_file_pairs( file_group ):
# # there is only one historical per group since these have been pre-processed to a single file and date range
# historical = [ file_group.pop( count ) for count, i in enumerate( file_group ) if 'historical' in i ]
# return zip( np.repeat( historical, len(file_group) ).tolist(), file_group )
# grouped_pairs = [ make_rcp_file_pairs( file_group ) for file_group in file_groups ]
# for file_group in grouped_pairs:
# for historical_fn, modeled_fn in file_group:
# output_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final'
# climatology_begin = '1961-01'
# climatology_end = '1990-12'
# cru_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# anomalies_calc_type = 'proportional'
# metric = 'pct'
# downscale_operation = 'mult'
# ncores = '10'
# # future modeled data
# # # build the args
# args_tuples = [ ( 'mi', modeled_fn ),
# ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# ncores = '10'
# os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
# del modeled_fn
# # now historical modeled data
# # # build the args
# args_tuples = [ ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
# # TO RUN THE TEMPERATURE DOWNSCALING USE THIS EXAMPLE:
# import os
# import pandas as pd
# import numpy as np
# # change to the script repo
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# # to run the futures:
# prepped_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
# file_groups = [ [os.path.join(root,f) for f in files] for root, sub, files in os.walk( prepped_dir ) if len(files) > 0 and files[0].endswith('.nc') ]
# def make_rcp_file_pairs( file_group ):
# # there is only one historical per group since these have been pre-processed to a single file and date range
# historical = [ file_group.pop( count ) for count, i in enumerate( file_group ) if 'historical' in i ]
# return zip( np.repeat( historical, len(file_group) ).tolist(), file_group )
# grouped_pairs = [ make_rcp_file_pairs( file_group ) for file_group in file_groups ]
# for file_group in grouped_pairs:
# for historical_fn, modeled_fn in file_group:
# output_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final'
# climatology_begin = '1961-01'
# climatology_end = '1990-12'
# cru_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/tas/akcan'
# anomalies_calc_type = 'absolute'
# metric = 'C'
# downscale_operation = 'add'
# ncores = '10'
# # future modeled data
# # # build the args
# args_tuples = [ ( 'mi', modeled_fn ),
# ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# ncores = '10'
# os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
# del modeled_fn
# # now historical modeled data
# # # build the args by pop(-ping) out the first entry which is modeled_fn
# args_tuples.pop(0)
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
# # begin_time = '1860-01'
# # end_time = '2005-12'
# # ( 'bt', begin_time ),
# # ( 'et', end_time ),
# # src=src; output_filename=fn; dst=dst; cru=cru; src_crs=meta_4326[ 'crs' ]; src_affine=meta_4326[ 'affine' ]; dst_crs=meta_3338[ 'crs' ]; dst_affine=meta_3338[ 'affine' ]; dst_meta=meta_3338; operation=downscale_operation, variable=variable
# # some setup pathing <<-- THIS TO BE CONVERTED TO ARGUMENTS AT COMMAND LINE
# # historical_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/cmip5_clt_nonstandard/prepped/GFDL-CM3/clt/clt_Amon_GFDL-CM3_historical_r1i1p1_186001_200512.nc'
# # modeled_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/cmip5_clt_nonstandard/prepped/GFDL-CM3/clt/clt_Amon_GFDL-CM3_rcp26_r1i1p1_200601_210012.nc'
# # variable = 'cld'
# # metric = 'pct'
# # # output_path = '/home/UA/malindgren/Documents/hur/akcan/new'
# # time_begin = '2006-01' # will change for future and historical
# # time_end = '2100-12' # will change for future and historical
# # climatology_begin = '1961'
# # climatology_end = '1990'
# # plev = 1000 # this is in millibar data, this is also a None default!
# # cru_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts20/akcan'
# # anomalies_calc_type = 'proportional'
|
mit
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/compat/pickle_compat.py
|
1
|
2845
|
""" support pre 0.12 series pickle compatibility """
# flake8: noqa
import sys
import numpy as np
import pandas
import copy
import pickle as pkl
from pandas import compat, Index
from pandas.compat import u, string_types
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if type(args[0]) is type:
n = args[0].__name__
try:
stack[-1] = func(*args)
return
except Exception as e:
# if we have a deprecated function
# try to replace and try again
if '_reconstruct: First argument must be a sub-type of ndarray' in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except:
pass
# try to reencode the arguments
if getattr(self,'encoding',None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args])
try:
stack[-1] = func(*args)
return
except:
pass
if getattr(self,'is_verbose',None):
print(sys.exc_info())
print(func, args)
raise
stack[-1] = value
if compat.PY3:
class Unpickler(pkl._Unpickler):
pass
else:
class Unpickler(pkl.Unpickler):
pass
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
# py3 compat
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except:
pass
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except:
raise
|
gpl-2.0
|
yunfeilu/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
59
|
76336
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/indexes/multi/test_duplicates.py
|
1
|
9588
|
from itertools import product
import numpy as np
import pytest
from pandas._libs import hashtable
from pandas import DatetimeIndex, MultiIndex
import pandas.util.testing as tm
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_codes():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
codes=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
codes=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
# single instance of NaN
mi_nan = MultiIndex(levels=[['a', 'b'], [0, 1]],
codes=[[-1, 0, 0, 1, 1], [-1, 0, 1, 0, 1]])
assert mi_nan.is_unique is True
assert mi_nan.has_duplicates is False
# multiple instances of NaN
mi_nan_dup = MultiIndex(levels=[['a', 'b'], [0, 1]],
codes=[[-1, -1, 0, 0, 1, 1], [-1, -1, 0, 1, 0, 1]])
assert mi_nan_dup.is_unique is False
assert mi_nan_dup.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [('x', 'out', 'z', 5, 'y', 'in', 'z', 169),
('x', 'out', 'z', 7, 'y', 'in', 'z', 119),
('x', 'out', 'z', 9, 'y', 'in', 'z', 135),
('x', 'out', 'z', 13, 'y', 'in', 'z', 145),
('x', 'out', 'z', 14, 'y', 'in', 'z', 158),
('x', 'out', 'z', 16, 'y', 'in', 'z', 122),
('x', 'out', 'z', 17, 'y', 'in', 'z', 160),
('x', 'out', 'z', 18, 'y', 'in', 'z', 180),
('x', 'out', 'z', 20, 'y', 'in', 'z', 143),
('x', 'out', 'z', 21, 'y', 'in', 'z', 128),
('x', 'out', 'z', 22, 'y', 'in', 'z', 129),
('x', 'out', 'z', 25, 'y', 'in', 'z', 111),
('x', 'out', 'z', 28, 'y', 'in', 'z', 114),
('x', 'out', 'z', 29, 'y', 'in', 'z', 121),
('x', 'out', 'z', 31, 'y', 'in', 'z', 126),
('x', 'out', 'z', 32, 'y', 'in', 'z', 155),
('x', 'out', 'z', 33, 'y', 'in', 'z', 123),
('x', 'out', 'z', 12, 'y', 'in', 'z', 144)]
mi = MultiIndex.from_tuples(t)
assert not mi.has_duplicates
def test_has_duplicates_overflow():
# handle int64 overflow if possible
def check(nlevels, with_nulls):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
codes[500] = -1 # common nan value
codes = [codes.copy() for i in range(nlevels)]
for i in range(nlevels):
codes[i][500 + i - nlevels // 2] = -1
codes += [np.array([-1, 1]).repeat(500)]
else:
codes = [codes] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
mi = MultiIndex(levels=levels, codes=codes)
assert not mi.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
codes = list(map(f, codes))
mi = MultiIndex(levels=levels, codes=codes)
else:
values = mi.values.tolist()
mi = MultiIndex.from_tuples(values + [values[0]])
assert mi.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
@pytest.mark.parametrize('keep, expected', [
('first', np.array([False, False, False, True, True, False])),
('last', np.array([False, True, True, False, False, False])),
(False, np.array([False, True, True, True, True, False]))
])
def test_duplicated(idx_dup, keep, expected):
result = idx_dup.duplicated(keep=keep)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
def test_duplicated_large(keep):
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
codes = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, codes=codes)
result = mi.duplicated(keep=keep)
expected = hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(result, expected)
def test_get_duplicates():
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with tm.assert_produces_warning(FutureWarning):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []]))
tm.assert_numpy_array_equal(mi.duplicated(),
np.zeros(2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
codes = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
codes=np.random.permutation(list(codes)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with tm.assert_produces_warning(FutureWarning):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(),
np.zeros(len(mi), dtype='bool'))
|
bsd-3-clause
|
nysbc/Anisotropy
|
ThreeDFSC/programs/Chimera/lineplot_template.py
|
1
|
3836
|
# -----------------------------------------------------------------------------
# Make a matplotlib plot of density values along a ray from center of a map.
# Use the current view direction and update plot as models are rotated.
# For Yong Zi Tan for 3D FSC plotting.
#
# This script registers command "fscplot" which takes one argument, the density
# map for which the plot is made. For example,
#
# fscplot #3D FSC Map #Actual Density Map
#
# Created by Tom Goddard (Thanks!)
# Modified by Yong Zi Tan
#
def ray_values(v, direction):
d = v.data
center = [0.5*(s+1) for s in d.size]
radius = 0.5*min([s*t for s,t in zip(d.size, d.step)])
steps = max(d.size)
from Matrix import norm
dn = norm(direction)
from numpy import array, arange, float32, outer
dir = array(direction)/dn
spacing = radius/dn
radii = arange(0, steps, dtype = float32)*(radius/steps)
ray_points = outer(radii, dir)
values = v.interpolated_values(ray_points)
return radii, values, radius
# -----------------------------------------------------------------------------
#
def plot(x, y, xlabel, ylabel, title, fig = None):
import matplotlib.pyplot as plt
global_x = #==global_x==#
global_y = #==global_y==#
if fig is None:
fig = plt.figure()
fig.plot = ax = fig.add_subplot(1,1,1)
else:
ax = fig.plot
ax.clear()
plt.subplots_adjust(top=0.85)
ax.plot(x, y, linewidth=2.0)
ax.plot(global_x, global_y, 'r', linewidth=1.0) # Plot global FSC
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim(ymin = -0.2, ymax = 1.01)
ax.set_title(title)
ax.grid(True)
fig.canvas.manager.show()
return fig
# -----------------------------------------------------------------------------
#
def update_plot(fsc_map, fig = None):
xf = fsc_map.openState.xform
from chimera import Vector
direction = xf.inverse().apply(Vector(0,0,-1)).data()
preradii, values, radius = ray_values(fsc_map, direction)
radii = []
apix = #==apix==#
resolution_list = []
for i in range(len(preradii)):
radii.append(preradii[i]/(radius*2*apix))
for i in range(len(values)):
if values[i] < 0.143:
resolution_list.append(1/radii[i-1])
break
resolution = resolution_list[0]
#title = '3D FSC plotted on axis %.3g,%.3g,%.3g.' % direction
title = '3D FSC Plot.\nZ directional resolution (out-of-plane in blue) is %.2f.\nGlobal resolution (in red) is %.2f.' % (resolution, #==global_res==#)
fig = plot(radii, values, xlabel = 'Spatial Resolution', ylabel = 'Correlation', title = title, fig = fig)
color_map(resolution)
return fig
# -----------------------------------------------------------------------------
#
def color_map(resolution):
import chimera
from chimera import runCommand
maxres = #==maxres==#
minres = #==minres==#
a = (resolution-maxres)/(minres-maxres)
r, g, b = 1-a, 0.0, a
runCommand('color %0.2f,%0.2f,%0.2f,1.0 #1' % (r, g, b))
# -----------------------------------------------------------------------------
#
def fsc_plot(fscMap):
fig = update_plot(fscMap)
from chimera import triggers
h = triggers.addHandler('OpenState', motion_cb, (fscMap, fig))
# -----------------------------------------------------------------------------
#
def motion_cb(trigger_name, mf, trigger_data):
if 'transformation change' in trigger_data.reasons:
fsc_map, fig = mf
update_plot(fsc_map, fig)
# -----------------------------------------------------------------------------
#
def fscplot_cmd(cmdname, args):
from Commands import volume_arg, parse_arguments
req_args = [('fscMap', volume_arg)]
kw = parse_arguments(cmdname, args, req_args)
fsc_plot(**kw)
# -----------------------------------------------------------------------------
#
from Midas.midas_text import addCommand
addCommand('fscplot', fscplot_cmd)
|
mit
|
meduz/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
18
|
41552
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo'])
# The predictions should be in original labels
assert_equal(sorted(np.unique(lr_str.predict(X_ref))),
['bar', 'baz', 'foo'])
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))),
['bar', 'baz', 'foo'])
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz'])
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag']:
max_iter = 100 if solver == 'sag' else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-2, cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10,
random_state=42)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
random_state=42)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2}, random_state=42)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20, random_state=0)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
|
bsd-3-clause
|
probml/pyprobml
|
scripts/hbayes_binom_covid_uninf_pymc3.py
|
1
|
2993
|
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import pandas as pd
#import seaborn as sns
import pymc3 as pm
import arviz as az
import theano.tensor as tt
np.random.seed(123)
G_samples = np.array([0, 0, 2, 0, 1, 1, 0, 2, 1, 3, 0, 1, 1, 1,
54, 0, 0, 1, 3, 0])
N_samples = np.array([1083, 855, 3461, 657, 1208, 1025, 527,
1668, 583, 582, 917, 857,
680, 917, 53637, 874, 395, 581, 588, 383])
G_samples = np.array([1, 0, 3, 0, 1, 5, 11])
N_samples = np.array([1083, 855, 3461, 657, 1208, 5000, 10000])
if False:
with pm.Model() as model:
μ = pm.Beta('μ', 1., 1.)
κ = pm.HalfNormal('κ', 10)
theta = pm.Beta('θ', alpha=μ*κ, beta=(1.0-μ)*κ, shape=len(N_samples))
#y = pm.Bernoulli('y', p=θ[group_idx], observed=data)
y = pm.Binomial('y', p=theta, observed=G_samples, n=N_samples)
trace = pm.sample(1000)
#https://docs.pymc.io/notebooks/GLM-hierarchical-binominal-model.html
def logp_ab(value):
''' prior density'''
return tt.log(tt.pow(tt.sum(value), -5/2))
with pm.Model() as model:
# Uninformative prior for alpha and beta
ab = pm.HalfFlat('ab',
shape=2,
testval=np.asarray([1., 1.]))
pm.Potential('p(a, b)', logp_ab(ab))
alpha = pm.Deterministic('alpha', ab[0])
beta = pm.Deterministic('beta', ab[1])
theta = pm.Beta('θ', alpha=alpha, beta=beta, shape=len(N_samples))
y = pm.Binomial('y', p=theta, observed=G_samples, n=N_samples)
trace = pm.sample(1000)
az.plot_trace(trace)
plt.savefig('../figures/hbayes_binom_covid_trace.png', dpi=300)
print(az.summary(trace))
axes = az.plot_forest(
trace, var_names='θ', hdi_prob=0.95, combined=False, colors='cycle')
y_lims = axes[0].get_ylim()
plt.savefig('../figures/hbayes_binom_covid_forest.png', dpi=300)
J = len(N_samples)
post_mean = np.zeros(J)
samples = trace['θ']
post_mean = np.mean(samples, axis=0)
alphas = trace['alpha']
betas = trace['beta']
alpha_mean = np.mean(alphas)
beta_mean = np.mean(betas)
hyper_mean = alpha_mean/(alpha_mean + beta_mean)
print('hyper mean')
print(hyper_mean)
hyper_mean2 = np.mean(alphas / (alphas+betas))
print(hyper_mean2)
mle = G_samples / N_samples
pooled_mle = np.sum(G_samples) / np.sum(N_samples)
print('pooled mle')
print(pooled_mle)
fig, axs = plt.subplots(4,1, figsize=(8,8))
axs = np.reshape(axs, 4)
xs = np.arange(J)
ax = axs[0]
ax.bar(xs, G_samples)
ax.set_ylim(0, 5)
ax.set_title('number of cases (truncated at 5)')
ax = axs[1]
ax.bar(xs, N_samples)
ax.set_ylim(0, 1000)
ax.set_title('popn size (truncated at 1000)')
ax = axs[2]
ax.bar(xs, mle)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
ax = axs[3]
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_title('posterior mean (red line = hparam)')
plt.savefig('../figures/hbayes_binom_covid_barplot.png', dpi=300)
|
mit
|
bikong2/scikit-learn
|
examples/plot_kernel_ridge_regression.py
|
230
|
6222
|
"""
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
MridulS/sympy
|
sympy/external/importtools.py
|
85
|
7294
|
"""Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
|
bsd-3-clause
|
niliafsari/KSP-SN
|
findSNserver.py
|
1
|
2601
|
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import glob
def findSN(filename,verbosity=0, directory=''):
filename_raw = filename
info = filename.split('.')
ref_format=info[0]+'.'+info[1]+'.'+info[2]+'*.REF.fits'
reference=glob.glob(ref_format)
ax1=plt.subplot(1, 2, 1)
hdulist = fits.open(filename_raw)
image = hdulist[0].data
image1 = hdulist[0].data
header = hdulist[0].header
coord = '92.654754:-34.14111'
RA, DEC = [float(coord) for coord in coord.split(':')]
hdulist.close()
wcs = WCS(filename_raw)
Xo, Yo = wcs.all_world2pix(RA, DEC, 0)
Xo, Yo = int(Xo), int(Yo)
r = 100
Y = np.linspace(Yo - r, Yo + r, 2 * r)
X = np.linspace(Xo - r, Xo + r, 2 * r)
y, x = np.meshgrid(Y, X)
dat = image.T[Xo - r:Xo + r, Yo - r:Yo + r]
#print np.max(dat)
plt.pcolormesh(x, y, image.T[Xo - r:Xo + r, Yo - r:Yo + r],
cmap='gray_r', vmax=150, vmin=-40)
ax1.set_title("original")
plt.axis([Xo - r, Xo + r, Yo - r, Yo + r])
plt.gca().set_aspect('equal', adjustable='box')
ax2=plt.subplot(1, 2, 2)
hdulist = fits.open(reference)
image = hdulist[0].data
image2 = hdulist[0].data
header = hdulist[0].header
coord = '92.654754:-34.14111'
RA, DEC = [float(coord) for coord in coord.split(':')]
hdulist.close()
wcs = WCS(filename)
Xo, Yo = wcs.all_world2pix(RA, DEC, 0)
Xo, Yo = int(Xo), int(Yo)
Y = np.linspace(Yo - r, Yo + r, 2 * r)
X = np.linspace(Xo - r, Xo + r, 2 * r)
y, x = np.meshgrid(Y, X)
plt.pcolormesh(x, y, image.T[Xo - r:Xo + r, Yo - r:Yo + r],
cmap='gray_r', vmax=150, vmin=-40)
plt.axis([Xo - r, Xo + r, Yo - r, Yo + r])
ax2.set_title("subtracted 2*2")
plt.gca().set_aspect('equal', adjustable='box')
plt.suptitle('Name:' + info[0] + ' Filter:' + info[2] + ' Time:' + info[3])
plt.show()
plt.savefig(filename_raw.replace('.fits', '.png'))
plt.cla()
if __name__ == "__main__":
import argparse
# command line arguments
parser = argparse.ArgumentParser(description="plot image")
parser.add_argument("filename", type=str, help="fits image containing source")
parser.add_argument("-v", "--verbosity", action="count", default=0)
parser.add_argument("-d", "--directory", help="if identified, will output to a dir",type=str,default='')
args = parser.parse_args()
findSN(args.filename, verbosity=args.verbosity,directory=args.directory)
|
bsd-3-clause
|
maxplanck-ie/HiCExplorer
|
hicexplorer/hicPlotAverageRegions.py
|
1
|
4206
|
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import argparse
from hicexplorer._version import __version__
import logging
log = logging.getLogger(__name__)
from scipy.sparse import load_npz
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import rotate
from mpl_toolkits.axes_grid1 import make_axes_locatable
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
description="""
hicPlotAverage regions plots the data computed by hicAverageRegions. It shows the summed up and averaged regions around
all given reference points. This tool is useful to plot differences at certain reference points as for example TAD boundaries between samples.
""")
parserRequired = parser.add_argument_group('Required arguments')
parserRequired.add_argument('--matrix', '-m',
help='The averaged regions file computed by hicAverageRegions (npz file).',
required=True)
parserRequired.add_argument('--outputFile', '-o',
help='The averaged regions plot.',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--log1p',
help='Plot log1p of the matrix values.',
action='store_true')
parserOpt.add_argument('--log',
help='Plot log of the matrix values.',
action='store_true')
parserOpt.add_argument('--colorMap',
help='Color map to use for the heatmap. Available '
'values can be seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html',
default='hot_r')
parserOpt.add_argument('--vMin',
help='Minimum score value.',
type=float,
default=None)
parserOpt.add_argument('--vMax',
help='Maximum score value.',
type=float,
default=None)
parserOpt.add_argument('--dpi',
help='Resolution of image if'
'ouput is a raster graphics image (e.g png, jpg).',
type=int,
default=300)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
matrix = load_npz(args.matrix)
matrix = matrix.toarray()
matrix = np.triu(matrix)
matrix = rotate(matrix, 45, cval=np.nan)
matrix_shapes = matrix.shape
matrix = matrix[:matrix_shapes[0] // 2, :]
if args.log1p:
matrix += 1
fig = plt.figure()
axis = plt.gca()
# Force the scale to correspond to vMin vMax even if these values
# are not in the range.
if args.log:
norm = LogNorm(vmin=args.vMin, vmax=args.vMax)
elif args.log1p:
if args.vMin is not None:
vMin = args.vMin + 1
else:
vMin = None
if args.vMax is not None:
vMax = args.vMax + 1
else:
vMax = None
norm = LogNorm(vmin=vMin, vmax=vMax)
else:
norm = matplotlib.colors.Normalize(vmin=args.vMin, vmax=args.vMax)
matrix_axis = axis.matshow(matrix, cmap=args.colorMap, norm=norm)
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
axis.xaxis.set_visible(False)
axis.yaxis.set_visible(False)
fig.colorbar(matrix_axis, cax=cax)
plt.tight_layout()
plt.savefig(args.outputFile, dpi=args.dpi)
|
gpl-2.0
|
wavelets/lifelines
|
lifelines/fitters/coxph_fitter.py
|
3
|
18580
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from numpy import dot, exp
from numpy.linalg import solve, norm, inv
from scipy.integrate import trapz
import scipy.stats as stats
from lifelines.fitters import BaseFitter
from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize,\
significance_code, concordance_index, _get_index, qth_survival_times
class CoxPHFitter(BaseFitter):
"""
This class implements fitting Cox's proportional hazard model:
h(t|x) = h_0(t)*exp(x'*beta)
Parameters:
alpha: the level in the confidence intervals.
tie_method: specify how the fitter should deal with ties. Currently only
'Efron' is available.
normalize: substract the mean and divide by standard deviation of each covariate
in the input data before performing any fitting.
penalizer: Attach a L2 penalizer to the size of the coeffcients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of beta_i. Recommended, even if a small value.
The penalty is 1/2 * penalizer * ||beta||^2.
"""
def __init__(self, alpha=0.95, tie_method='Efron', normalize=True, penalizer=0.0):
if not (0 < alpha <= 1.):
raise ValueError('alpha parameter must be between 0 and 1.')
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != 'Efron':
raise NotImplementedError("Only Efron is available atm.")
self.alpha = alpha
self.normalize = normalize
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = None
def _get_efron_values(self, X, beta, T, E, include_likelihood=False):
"""
Calculates the first and second order vector differentials,
with respect to beta. If 'include_likelihood' is True, then
the log likelihood is also calculated. This is omitted by default
to speed up the fit.
Note that X, T, E are assumed to be sorted on T!
Parameters:
X: (n,d) numpy array of observations.
beta: (1, d) numpy array of coefficients.
T: (n) numpy array representing observed durations.
E: (n) numpy array representing death events.
Returns:
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: double, if include_likelihood=True
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((1, d))
log_lik = 0
# Init risk and tie sums to zero
x_tie_sum = np.zeros((1, d))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties
tie_count = 0
# Iterate backwards to utilize recursive relationship
for i, (ti, ei) in reversed(list(enumerate(zip(T, E)))):
# Doing it like this to preserve shape
xi = X[i:i + 1]
# Calculate phi values
phi_i = exp(dot(xi, beta))
phi_x_i = dot(phi_i, xi)
phi_x_x_i = dot(xi.T, xi) * phi_i
# Calculate sums of Risk set
risk_phi += phi_i
risk_phi_x += phi_x_i
risk_phi_x_x += phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_tie_sum += xi
tie_phi += phi_i
tie_phi_x += phi_x_i
tie_phi_x_x += phi_x_x_i
# Keep track of count
tie_count += 1
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
partial_gradient = np.zeros((1, d))
for l in range(tie_count):
c = l / tie_count
denom = (risk_phi - c * tie_phi)
z = (risk_phi_x - c * tie_phi_x)
if denom == 0:
# Can't divide by zero
raise ValueError("Denominator was zero")
# Gradient
partial_gradient += z / denom
# Hessian
a1 = (risk_phi_x_x - c * tie_phi_x_x) / denom
# In case z and denom both are really small numbers,
# make sure to do division before multiplications
a2 = dot(z.T / denom, z / denom)
hessian -= (a1 - a2)
if include_likelihood:
log_lik -= np.log(denom).ravel()[0]
# Values outside tie sum
gradient += x_tie_sum - partial_gradient
if include_likelihood:
log_lik += dot(x_tie_sum, beta).ravel()[0]
# reset tie values
tie_count = 0
x_tie_sum = np.zeros((1, d))
tie_phi = 0
tie_phi_x = np.zeros((1, d))
tie_phi_x_x = np.zeros((d, d))
if include_likelihood:
return hessian, gradient, log_lik
else:
return hessian, gradient
def _newton_rhaphson(self, X, T, E, initial_beta=None, step_size=1.,
precision=10e-5, show_progress=True, include_likelihood=False):
"""
Newton Rhaphson algorithm for fitting CPH model.
Note that data is assumed to be sorted on T!
Parameters:
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
initial_beta: (1,d) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float > 0.001 to determine a starting step size in NR algorithm.
precision: the convergence halts if the norm of delta between
successive positions is less than epsilon.
include_likelihood: saves the final log-likelihood to the CoxPHFitter under _log_likelihood.
Returns:
beta: (1,d) numpy array.
"""
assert precision <= 1., "precision must be less than or equal to 1."
n, d = X.shape
# Want as bools
E = E.astype(bool)
# make sure betas are correct size.
if initial_beta is not None:
assert initial_beta.shape == (d, 1)
beta = initial_beta
else:
beta = np.zeros((d, 1))
# Method of choice is just efron right now
if self.tie_method == 'Efron':
get_gradients = self._get_efron_values
else:
raise NotImplementedError("Only Efron is available.")
i = 1
converging = True
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
while converging and i < 50 and step_size > 0.001:
if self.strata is None:
output = get_gradients(X.values, beta, T.values, E.values, include_likelihood=include_likelihood)
h, g = output[:2]
else:
g = np.zeros_like(beta).T
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for strata in np.unique(X.index):
stratified_X, stratified_T, stratified_E = X.loc[[strata]], T.loc[[strata]], E.loc[[strata]]
output = get_gradients(stratified_X.values, beta, stratified_T.values, stratified_E.values, include_likelihood=include_likelihood)
_h, _g = output[:2]
g += _g
h += _h
ll += output[2] if include_likelihood else 0
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta.T
h.flat[::d + 1] -= self.penalizer
delta = solve(-h, step_size * g.T)
if np.any(np.isnan(delta)):
raise ValueError("delta contains nan value(s). Convergence halted.")
# Only allow small steps
if norm(delta) > 10:
step_size *= 0.5
continue
beta += delta
# Save these as pending result
hessian, gradient = h, g
if norm(delta) < precision:
converging = False
if ((i % 10) == 0) and show_progress:
print("Iteration %d: delta = %.5f" % (i, norm(delta)))
i += 1
self._hessian_ = hessian
self._score_ = gradient
if include_likelihood:
self._log_likelihood = output[-1] if self.strata is None else ll
if show_progress:
print("Convergence completed after %d iterations." % (i))
return beta
def fit(self, df, duration_col, event_col=None,
show_progress=False, initial_beta=None, include_likelihood=False,
strata=None):
"""
Fit the Cox Propertional Hazard model to a dataset. Tied survival times
are handled using Efron's tie-method.
Parameters:
df: a Pandas dataframe with necessary columns `duration_col` and
`event_col`, plus other covariates. `duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects'
lifetimes.
event_col: the column in dataframe that contains the subjects' death
observation. If left as None, assume all individuals are non-censored.
show_progress: since the fitter is iterative, show convergence
diagnostics.
initial_beta: initialize the starting point of the iterative
algorithm. Default is the zero vector.
include_likelihood: saves the final log-likelihood to the CoxPHFitter under
the property _log_likelihood.
strata: specify a list of columns to use in stratification. This is useful if a
catagorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Returns:
self, with additional properties: hazards_
"""
df = df.copy()
# Sort on time
df.sort(duration_col, inplace=True)
# remove strata coefs
self.strata = strata
if strata is not None:
df = df.set_index(strata)
# Extract time and event
T = df[duration_col]
del df[duration_col]
if event_col is None:
E = pd.Series(np.ones(df.shape[0]), index=df.index)
else:
E = df[event_col]
del df[event_col]
# Store original non-normalized data
self.data = df if self.strata is None else df.reset_index()
if self.normalize:
# Need to normalize future inputs as well
self._norm_mean = df.mean(0)
self._norm_std = df.std(0)
df = normalize(df)
E = E.astype(bool)
self._check_values(df)
hazards_ = self._newton_rhaphson(df, T, E, initial_beta=initial_beta,
show_progress=show_progress,
include_likelihood=include_likelihood)
self.hazards_ = pd.DataFrame(hazards_.T, columns=df.columns,
index=['coef'])
self.confidence_intervals_ = self._compute_confidence_intervals()
self.durations = T
self.event_observed = E
self.baseline_hazard_ = self._compute_baseline_hazard()
self.baseline_cumulative_hazard_ = self.baseline_hazard_.cumsum()
self.baseline_survival_ = exp(-self.baseline_cumulative_hazard_)
return self
def _check_values(self, X):
low_var = (X.var(0) < 10e-5)
if low_var.any():
cols = str(list(X.columns[low_var]))
print("Warning: column(s) %s have very low variance.\
This may harm convergence." % cols)
def _compute_confidence_intervals(self):
alpha2 = inv_normal_cdf((1. + self.alpha) / 2.)
se = self._compute_standard_errors()
hazards = self.hazards_.values
return pd.DataFrame(np.r_[hazards - alpha2 * se,
hazards + alpha2 * se],
index=['lower-bound', 'upper-bound'],
columns=self.hazards_.columns)
def _compute_standard_errors(self):
se = np.sqrt(inv(-self._hessian_).diagonal())
return pd.DataFrame(se[None, :],
index=['se'], columns=self.hazards_.columns)
def _compute_z_values(self):
return (self.hazards_.ix['coef'] /
self._compute_standard_errors().ix['se'])
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : pd.DataFrame
Contains columns coef, exp(coef), se(coef), z, p, lower, upper"""
df = pd.DataFrame(index=self.hazards_.columns)
df['coef'] = self.hazards_.ix['coef'].values
df['exp(coef)'] = exp(self.hazards_.ix['coef'].values)
df['se(coef)'] = self._compute_standard_errors().ix['se'].values
df['z'] = self._compute_z_values()
df['p'] = self._compute_p_values()
df['lower %.2f' % self.alpha] = self.confidence_intervals_.ix['lower-bound'].values
df['upper %.2f' % self.alpha] = self.confidence_intervals_.ix['upper-bound'].values
return df
def print_summary(self):
"""
Print summary statistics describing the fit.
"""
df = self.summary
# Significance codes last
df[''] = [significance_code(p) for p in df['p']]
# Print information about data first
print('n={}, number of events={}'.format(self.data.shape[0],
np.where(self.event_observed)[0].shape[0]),
end='\n\n')
print(df.to_string(float_format=lambda f: '{:.3e}'.format(f)))
# Significance code explanation
print('---')
print("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ",
end='\n\n')
print("Concordance = {:.3f}"
.format(concordance_index(self.durations,
-self.predict_partial_hazard(self.data).values.ravel(),
self.event_observed)))
return
def predict_partial_hazard(self, X):
"""
X: a (n,d) covariate matrix
If covariates were normalized during fitting, they are normalized
in the same way here.
If X is a dataframe, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to \exp{\beta X}
"""
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = self.hazards_.columns
X = X[order]
if self.normalize:
# Assuming correct ordering and number of columns
X = normalize(X, self._norm_mean.values, self._norm_std.values)
return pd.DataFrame(exp(np.dot(X, self.hazards_.T)), index=index)
def predict_cumulative_hazard(self, X):
"""
X: a (n,d) covariate matrix
Returns the cumulative hazard for the individuals.
"""
v = self.predict_partial_hazard(X)
s_0 = self.baseline_survival_
col = _get_index(X)
return pd.DataFrame(-np.dot(np.log(s_0), v.T), index=self.baseline_survival_.index, columns=col)
def predict_survival_function(self, X):
"""
X: a (n,d) covariate matrix
Returns the survival functions for the individuals
"""
return exp(-self.predict_cumulative_hazard(X))
def predict_percentile(self, X, p=0.5):
"""
X: a (n,d) covariate matrix
Returns the median lifetimes for the individuals.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
"""
index = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X)[index])
def predict_median(self, X):
"""
X: a (n,d) covariate matrix
Returns the median lifetimes for the individuals
"""
return self.predict_percentile(X, 0.5)
def predict_expectation(self, X):
"""
Compute the expected lifetime, E[T], using covarites X.
"""
index = _get_index(X)
v = self.predict_survival_function(X)[index]
return pd.DataFrame(trapz(v.values.T, v.index), index=index)
def predict(self, X):
return self.predict_median(X)
def _compute_baseline_hazard(self):
# http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes3.pdf
ind_hazards = self.predict_partial_hazard(self.data).values
event_table = survival_table_from_events(self.durations.values,
self.event_observed.values)
baseline_hazard_ = pd.DataFrame(np.zeros((event_table.shape[0], 1)),
index=event_table.index,
columns=['baseline hazard'])
for t, s in event_table.iterrows():
less = np.array(self.durations >= t)
if ind_hazards[less].sum() == 0:
v = 0
else:
v = (s['observed'] / ind_hazards[less].sum())
baseline_hazard_.ix[t] = v
return baseline_hazard_
|
mit
|
ankurankan/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
134
|
7452
|
"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
jblackburne/scikit-learn
|
sklearn/utils/setup.py
|
24
|
2920
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
RayMick/scikit-learn
|
examples/decomposition/plot_incremental_pca.py
|
244
|
1878
|
"""
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
|
bsd-3-clause
|
rvraghav93/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
37
|
4608
|
"""
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
# #############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
# #############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
# #############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
# #############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
bricegnichols/urbansim
|
urbansim/models/util.py
|
5
|
9254
|
"""
Utilities used within the ``urbansim.models`` package.
"""
import collections
import logging
import numbers
from StringIO import StringIO
from tokenize import generate_tokens, NAME
import numpy as np
import pandas as pd
import patsy
from zbox import toolz as tz
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
def apply_filter_query(df, filters=None):
"""
Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame
"""
with log_start_finish('apply filter query: {!r}'.format(filters), logger):
if filters:
if isinstance(filters, str):
query = filters
else:
query = ' and '.join(filters)
return df.query(query)
else:
return df
def _filterize(name, value):
"""
Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str
"""
if name.endswith('_min'):
name = name[:-4]
comp = '>='
elif name.endswith('_max'):
name = name[:-4]
comp = '<'
else:
comp = '=='
result = '{} {} {!r}'.format(name, comp, value)
logger.debug(
'converted name={} and value={} to filter {}'.format(
name, value, result))
return result
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters)
def concat_indexes(indexes):
"""
Concatenate a sequence of pandas Indexes.
Parameters
----------
indexes : sequence of pandas.Index
Returns
-------
pandas.Index
"""
return pd.Index(np.concatenate(indexes))
def has_constant_expr(expr):
"""
Report whether a model expression has constant specific term.
That is, a term explicitly specying whether the model should or
should not include a constant. (e.g. '+ 1' or '- 1'.)
Parameters
----------
expr : str
Model expression to check.
Returns
-------
has_constant : bool
"""
def has_constant(node):
if node.type == 'ONE':
return True
for n in node.args:
if has_constant(n):
return True
return False
return has_constant(patsy.parse_formula.parse_formula(expr))
def str_model_expression(expr, add_constant=True):
"""
We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy.
"""
if not isinstance(expr, str):
if isinstance(expr, collections.Mapping):
left_side = expr.get('left_side')
right_side = str_model_expression(expr['right_side'], add_constant)
else:
# some kind of iterable like a list
left_side = None
right_side = ' + '.join(expr)
if left_side:
model_expression = ' ~ '.join((left_side, right_side))
else:
model_expression = right_side
else:
model_expression = expr
if not has_constant_expr(model_expression):
if add_constant:
model_expression += ' + 1'
else:
model_expression += ' - 1'
logger.debug(
'converted expression: {!r} to model: {!r}'.format(
expr, model_expression))
return model_expression
def sorted_groupby(df, groupby):
"""
Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame.
"""
start = 0
prev = df[groupby].iloc[start]
for i, x in enumerate(df[groupby]):
if x != prev:
yield prev, df.iloc[start:i]
prev = x
start = i
# need to send back the last group
yield prev, df.iloc[start:]
def columns_in_filters(filters):
"""
Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters.
"""
if not filters:
return []
if not isinstance(filters, str):
filters = ' '.join(filters)
columns = []
reserved = {'and', 'or', 'in', 'not'}
for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline):
if toknum == NAME and tokval not in reserved:
columns.append(tokval)
return list(tz.unique(columns))
def _tokens_from_patsy(node):
"""
Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode
"""
for n in node.args:
for t in _tokens_from_patsy(n):
yield t
if node.token:
yield node.token
def columns_in_formula(formula):
"""
Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str
"""
if formula is None:
return []
formula = str_model_expression(formula, add_constant=False)
columns = []
tokens = map(
lambda x: x.extra,
tz.remove(
lambda x: x.extra is None,
_tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))
for tok in tokens:
# if there are parentheses in the expression we
# want to drop them and everything outside
# and start again from the top
if '(' in tok:
start = tok.find('(') + 1
fin = tok.rfind(')')
columns.extend(columns_in_formula(tok[start:fin]))
else:
for toknum, tokval, _, _, _ in generate_tokens(
StringIO(tok).readline):
if toknum == NAME:
columns.append(tokval)
return list(tz.unique(columns))
|
bsd-3-clause
|
takuya1981/sms-tools
|
lectures/06-Harmonic-model/plots-code/carnatic-spectrum.py
|
22
|
1042
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/carnatic.wav')
pin = 1.4*fs
w = np.blackman(1601)
N = 4096
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5)
plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)])
plt.title('x (carnatic.wav)')
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5)
plt.axis([0,fs/4,-100,max(mX)])
plt.title ('mX')
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5)
plt.axis([0,fs/4,min(pX),27])
plt.title ('pX')
plt.tight_layout()
plt.savefig('carnatic-spectrum.png')
plt.show()
|
agpl-3.0
|
yw374cornell/e-mission-server
|
emission/analysis/intake/cleaning/location_smoothing.py
|
1
|
9333
|
# Standard imports
import numpy as np
import json
import logging
from dateutil import parser
import math
import pandas as pd
import attrdict as ad
import datetime as pydt
import time as time
import pytz
# Our imports
import emission.analysis.point_features as pf
import emission.analysis.intake.cleaning.cleaning_methods.speed_outlier_detection as eaico
import emission.analysis.intake.cleaning.cleaning_methods.jump_smoothing as eaicj
import emission.storage.pipeline_queries as epq
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.metadata as ecwm
import emission.core.wrapper.smoothresults as ecws
import emission.storage.decorations.useful_queries as taug
import emission.storage.decorations.location_queries as lq
import emission.core.get_database as edb
import emission.core.common as ec
np.set_printoptions(suppress=True)
def recalc_speed(points_df):
"""
The input dataframe already has "speed" and "distance" columns.
Drop them and recalculate speeds from the first point onwards.
The speed column has the speed between each point and its previous point.
The first row has a speed of zero.
"""
stripped_df = points_df.drop("speed", axis=1).drop("distance", axis=1)
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:])
distances = [pf.calDistance(p1, p2) for (p1, p2) in zipped_points_list]
distances.insert(0, 0)
with_speeds_df = pd.concat([stripped_df, pd.Series(distances, index=points_df.index, name="distance")], axis=1)
speeds = [pf.calSpeed(p1, p2) for (p1, p2) in zipped_points_list]
speeds.insert(0, 0)
with_speeds_df = pd.concat([with_speeds_df, pd.Series(speeds, index=points_df.index, name="speed")], axis=1)
return with_speeds_df
def add_dist_heading_speed(points_df):
# type: (pandas.DataFrame) -> pandas.DataFrame
"""
Returns a new dataframe with an added "speed" column.
The speed column has the speed between each point and its previous point.
The first row has a speed of zero.
"""
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:])
distances = [pf.calDistance(p1, p2) for (p1, p2) in zipped_points_list]
distances.insert(0, 0)
speeds = [pf.calSpeed(p1, p2) for (p1, p2) in zipped_points_list]
speeds.insert(0, 0)
headings = [pf.calHeading(p1, p2) for (p1, p2) in zipped_points_list]
headings.insert(0, 0)
with_distances_df = pd.concat([points_df, pd.Series(distances, name="distance")], axis=1)
with_speeds_df = pd.concat([with_distances_df, pd.Series(speeds, name="speed")], axis=1)
with_headings_df = pd.concat([with_speeds_df, pd.Series(headings, name="heading")], axis=1)
return with_headings_df
def add_heading_change(points_df):
"""
Returns a new dataframe with an added "heading_change" column.
The heading change column has the heading change between this point and the
two points preceding it. The first two rows have a speed of zero.
"""
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:], point_list[2:])
hcs = [pf.calHC(p1, p2, p3) for (p1, p2, p3) in zipped_points_list]
hcs.insert(0, 0)
hcs.insert(1, 0)
with_hcs_df = pd.concat([points_df, pd.Series(hcs, name="heading_change")], axis=1)
return with_hcs_df
def filter_current_sections(user_id):
time_query = epq.get_time_range_for_smoothing(user_id)
try:
sections_to_process = esda.get_objects(esda.RAW_SECTION_KEY, user_id,
time_query)
for section in sections_to_process:
logging.info("^" * 20 + ("Smoothing section %s for user %s" % (section.get_id(), user_id)) + "^"
* 20)
filter_jumps(user_id, section.get_id())
if len(sections_to_process) == 0:
# Didn't process anything new so start at the same point next time
last_section_processed = None
else:
last_section_processed = sections_to_process[-1]
epq.mark_smoothing_done(user_id, last_section_processed)
except:
logging.exception("Marking smoothing as failed")
epq.mark_smoothing_failed(user_id)
def filter_jumps(user_id, section_id):
"""
filters out any jumps in the points related to this section and stores a entry that lists the deleted points for
this trip and this section.
:param user_id: the user id to filter the trips for
:param section_id: the section_id to filter the trips for
:return: none. saves an entry with the filtered points into the database.
"""
logging.debug("filter_jumps(%s, %s) called" % (user_id, section_id))
outlier_algo = eaico.BoxplotOutlier()
filtering_algo = eaicj.SmoothZigzag()
tq = esda.get_time_query_for_trip_like(esda.RAW_SECTION_KEY, section_id)
ts = esta.TimeSeries.get_time_series(user_id)
section_points_df = ts.get_data_df("background/filtered_location", tq)
logging.debug("len(section_points_df) = %s" % len(section_points_df))
points_to_ignore_df = get_points_to_filter(section_points_df, outlier_algo, filtering_algo)
if points_to_ignore_df is None:
# There were no points to delete
return
deleted_point_id_list = list(points_to_ignore_df._id)
logging.debug("deleted %s points" % len(deleted_point_id_list))
filter_result = ecws.Smoothresults()
filter_result.section = section_id
filter_result.deleted_points = deleted_point_id_list
filter_result.outlier_algo = "BoxplotOutlier"
filter_result.filtering_algo = "SmoothZigzag"
result_entry = ecwe.Entry.create_entry(user_id, "analysis/smoothing", filter_result)
ts.insert(result_entry)
def get_points_to_filter(section_points_df, outlier_algo, filtering_algo):
"""
From the incoming dataframe, filter out large jumps using the specified outlier detection algorithm and
the specified filtering algorithm.
:param section_points_df: a dataframe of points for the current section
:param outlier_algo: the algorithm used to detect outliers
:param filtering_algo: the algorithm used to determine which of those outliers need to be filtered
:return: a dataframe of points that need to be stripped, if any.
None if none of them need to be stripped.
"""
with_speeds_df = add_dist_heading_speed(section_points_df)
logging.debug("section_points_df.shape = %s, with_speeds_df.shape = %s" %
(section_points_df.shape, with_speeds_df.shape))
# if filtering algo is none, there's nothing that can use the max speed
if outlier_algo is not None and filtering_algo is not None:
maxSpeed = outlier_algo.get_threshold(with_speeds_df)
# TODO: Is this the best way to do this? Or should I pass this in as an argument to filter?
# Or create an explicit set_speed() method?
# Or pass the outlier_algo as the parameter to the filtering_algo?
filtering_algo.maxSpeed = maxSpeed
logging.debug("maxSpeed = %s" % filtering_algo.maxSpeed)
if filtering_algo is not None:
try:
filtering_algo.filter(with_speeds_df)
to_delete_mask = np.logical_not(filtering_algo.inlier_mask_)
return with_speeds_df[to_delete_mask]
except Exception as e:
logging.debug("Caught error %s while processing section, skipping..." % e)
return None
else:
logging.debug("no filtering algo specified, returning None")
return None
def get_filtered_points(section_df, outlier_algo, filtering_algo):
"""
Filter the points that correspond to the section object that is passed in.
The section object is an AttrDict with the startTs and endTs fields.
Returns a filtered df with the index after the initial filter for accuracy
TODO: Switch this to the section wrapper object going forward
TODO: Note that here, we assume that the data has already been chunked into sections.
But really, we need to filter (at least for accuracy) before segmenting in
order to avoid issues like https://github.com/e-mission/e-mission-data-collection/issues/45
"""
with_speeds_df = add_dist_heading_speed(section_df)
# if filtering algo is none, there's nothing that can use the max speed
if outlier_algo is not None and filtering_algo is not None:
maxSpeed = outlier_algo.get_threshold(with_speeds_df)
# TODO: Is this the best way to do this? Or should I pass this in as an argument to filter?
# Or create an explicit set_speed() method?
# Or pass the outlier_algo as the parameter to the filtering_algo?
filtering_algo.maxSpeed = maxSpeed
if filtering_algo is not None:
try:
filtering_algo.filter(with_speeds_df)
return with_speeds_df[filtering_algo.inlier_mask_]
except Exception as e:
print ("Caught error %s while processing section, skipping..." % e)
return with_speeds_df
else:
return with_speeds_df
|
bsd-3-clause
|
rgommers/scipy
|
scipy/integrate/_quadrature.py
|
12
|
33319
|
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Dict, Tuple, Any, cast
import functools
import numpy as np
import math
import types
import warnings
# trapezoid is a public function for scipy.integrate,
# even though it's actually a NumPy function.
from numpy import trapz as trapezoid
from scipy.special import roots_legendre
from scipy.special import gammaln
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
'trapezoid', 'trapz', 'simps', 'simpson',
'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
'AccuracyWarning']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapezoid = _copy_func(trapezoid)
if trapezoid.__doc__:
trapezoid.__doc__ = trapezoid.__doc__.replace(
'sum, cumsum', 'numpy.cumsum')
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
def trapz(y, x=None, dx=1.0, axis=-1):
"""`An alias of `trapezoid`.
`trapz` is kept for backwards compatibility. For new code, prefer
`trapezoid` instead.
"""
return trapezoid(y, x=x, dx=dx, axis=axis)
class AccuracyWarning(Warning):
pass
if TYPE_CHECKING:
# workaround for mypy function attributes see:
# https://github.com/python/mypy/issues/2087#issuecomment-462726600
from typing_extensions import Protocol
class CacheAttributes(Protocol):
cache: Dict[int, Tuple[Any, Any]]
else:
CacheAttributes = Callable
def cache_decorator(func: Callable) -> CacheAttributes:
return cast(CacheAttributes, func)
@cache_decorator
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simpson: integrator for sampled data
cumulative_trapezoid: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
# Note: alias kept for backwards compatibility. Rename was done
# because cumtrapz is a slur in colloquial English (see gh-12924).
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""`An alias of `cumulative_trapezoid`.
`cumtrapz` is kept for backwards compatibility. For new code, prefer
`cumulative_trapezoid` instead.
"""
return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simpson(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(y[slice0] + 4*y[slice1] + y[slice2], axis=axis)
result *= dx / 3.0
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0] * (2 - 1.0/h0divh1) +
y[slice1] * (hsum * hsum / hprod) +
y[slice2] * (2 - h0divh1))
result = np.sum(tmp, axis=axis)
return result
# Note: alias kept for backwards compatibility. simps was renamed to simpson
# because the former is a slur in colloquial English (see gh-12924).
def simps(y, x=None, dx=1.0, axis=-1, even='avg'):
"""`An alias of `simpson`.
`simps` is kept for backwards compatibility. For new code, prefer
`simpson` instead.
"""
return simpson(y, x=x, dx=dx, axis=axis, even=even)
def simpson(y, x=None, dx=1.0, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : float, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumulative_trapezoid: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simpson(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simpson(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simpson(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simpson(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simpson(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simpson(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e., whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simpson : Integrators for sampled data.
cumulative_trapezoid : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
bsd-3-clause
|
brityboy/BotBoosted
|
src/information_gain_ratio.py
|
1
|
14789
|
import numpy as np
import pandas as pd
from collections import Counter
from itertools import combinations
'''
these functions are the general functions used by all information gain ratio
functions
'''
def is_categorical(x):
'''
INPUT
- single data point x
OUTPUT
- boolean
returns true if x is categorical else false
'''
return isinstance(x, str) or isinstance(x, bool) or isinstance(x, unicode)
def check_if_categorical(attribute, df):
'''
INPUT:
- attribute: the feature inside the dataframe to check
- df: the DataFrame itself
OUTPUT:
- boolean
Returns True if feature in df is categorical else False
'''
check_if_categorical = np.vectorize(is_categorical)
if np.mean(check_if_categorical(df[attribute].values)) == 1:
return True
else:
return False
def entropy(y):
'''
INPUT:
- y: 1d numpy array
OUTPUT:
- float
Return the entropy of the array y.
'''
unique = set(y)
count = Counter(y)
ent = 0
for val in unique:
p = count[val]/float(len(y))
ent += p * np.log2(p)
return -1 * ent
def information_gain(y, y1, y2, impurity_criterion):
'''
INPUT:
- y: 1d numpy array
- y1: 1d numpy array (labels for subset 1)
- y2: 1d numpy array (labels for subset 2)
OUTPUT:
- float
Return the information gain of making the given split.
'''
return impurity_criterion(y) - \
(float(len(y1))/len(y) * impurity_criterion(y1) +
float(len(y2))/len(y) * impurity_criterion(y2))
'''
these are the helper functions for the continuous version of information gain
ratio
'''
def multiple_information_gain(y, y_list, impurity_criterion):
'''
INPUT:
- y: 1d numpy array
- y_list: list of y values [y1, y2, y3]
- impurity_criterion: either gini or entropy
OUTPUT:
- float
Return the information gain of making the given split.
'''
aggregate_entropy = 0
for y_vals in y_list:
aggregate_entropy += float(len(y_vals))/len(y) * \
impurity_criterion(y_vals)
return impurity_criterion(y) - aggregate_entropy
def determine_optimal_continuous_split_values(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
- info_gain_array: numpy array of all information gains
- possible_splits: list of all possible split values
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = df[attribute].values
split_values = np.unique(sorted(attribute_value_array))[:-1]
possible_splits = list(combinations(split_values, 1))
max_info_gain = 0
for split in possible_splits:
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
def determine_optimal_continuous_split_values(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = df[attribute].values
split_values = np.unique(sorted(attribute_value_array))[:-1]
# possible_splits = list(combinations(split_values, 1))
max_info_gain = 0
for split in combinations(split_values, 1):
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
def split_list(doc_list, n_groups):
'''
INPUT
- doc_list - is a list of documents to be split up
- n_groups - is the number of groups to split the doc_list into
OUTPUT
- list
Returns a list of len n_groups which seeks to evenly split up the original
list into continuous sub_lists
'''
avg = len(doc_list) / float(n_groups)
split_lists = []
last = 0.0
while last < len(doc_list):
split_lists.append(doc_list[int(last):int(last + avg)])
last += avg
return split_lists
def potential_attribute_information_gain_continuous(X_list):
'''
INPUT
- X_list: list of optimally split attribute values
OUTPUT
- float
Returns the potential information gain for a continuous split variable
using ross quinlan's information gain ratio formula in C4.5
'''
potential_information_gain = 0
n_X = sum([len(subset_of_X) for subset_of_X in X_list])
for X_values in X_list:
subset_ratio = float(len(X_values))/n_X
potential_information_gain += subset_ratio * np.log2(subset_ratio)
return -1 * potential_information_gain
def make_multiple_split(X, y, split_value):
'''
INPUT:
- X: 2d numpy array
- y: 1d numpy array
- split_value: single integers or tuples
OUTPUT:
- X1: 2d numpy array (feature matrix for subset 1)
- X2: 2d numpy array (feature matrix for subset 2)
- X3: 2d numpy array (feature matrix for subset 3)
- X4: 2d numpy array (feature matrix for subset 4)
- y1: 1d numpy array (labels for subset 1)
- y2: 1d numpy array (labels for subset 2)
- y3: 1d numpy array (labels for subset 3)
- y4: 1d numpy array (labels for subset 4)
Return the multiple subsets of the dataset achieved by the given feature
and value to split on. --> two lists (one for X, one for y)
'''
if len(split_value) == 1:
split_value = split_value[0]
X1 = X[X <= split_value]
y1 = y[X <= split_value]
X2 = X[X > split_value]
y2 = y[X > split_value]
return [X1, X2], [y1, y2]
if len(split_value) == 2:
lower, upper = split_value
X1 = X[X <= lower]
y1 = y[X <= lower]
X2 = X[(X > lower) & (X <= upper)]
y2 = y[(X > lower) & (X <= upper)]
X3 = X[X > upper]
y3 = y[X > upper]
return [X1, X2, X3], [y1, y2, y3]
if len(split_value) == 3:
lower, mid, upper = split_value
X1 = X[X <= lower]
y1 = y[X <= lower]
X2 = X[(X > lower) & (X <= mid)]
y2 = y[(X > lower) & (X <= mid)]
X3 = X[(X > mid) & (X <= upper)]
y3 = y[(X > mid) & (X <= upper)]
X4 = X[X > upper]
y4 = y[X > upper]
return [X1, X2, X3, X4], [y1, y2, y3, y4]
def information_gain_ratio_continuous(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
max_split = determine_optimal_continuous_split_values(attribute, df, y)
X_list, y_list = make_multiple_split(df[attribute].values, y, max_split)
ig = multiple_information_gain(y, y_list, entropy)
pig = potential_attribute_information_gain_continuous(X_list)
return ig/pig
'''
these functions below compute for information gain ratio for continuous
variables and work in numpy, thus could potentially be much faster than
the pandas version
'''
def information_gain_ratio_continuous_1d(X, y):
'''
INPUT
- X: continuous feature, 1d array
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
max_split = determine_optimal_continuous_split_values_1d(X, y)
X_list, y_list = make_multiple_split(X, y, max_split)
ig = multiple_information_gain(y, y_list, entropy)
pig = potential_attribute_information_gain_continuous(X_list)
return ig/pig
def determine_optimal_continuous_split_values_1d(X, y):
'''
INPUT
- X: continuous feature, 1d array
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = X
split_values = np.unique(sorted(attribute_value_array))[:-1]
max_info_gain = 0
for split in combinations(split_values, 1):
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
'''
these are the categorical functions that work 100 percent correctly accdg to
ross quinlan's information gain ratio formulas from C4.5
'''
def information_gain_by_attribute_categorical(attribute, df, y):
'''
INPUT
- attribute: string, column in the dataframe that IS categorical
- df: dataframe of features
- y: 1d array of targets
OUTPUT
- float
Return the information gain for a specific attribute
'''
attribute_value_array = df[attribute].values
possible_attribute_values = np.unique(attribute_value_array)
attribute_info_gain = 0
numerator_values = Counter(attribute_value_array)
for possible_attribute_value in possible_attribute_values:
value_info_gain = 0
subset_of_y_values = \
y[attribute_value_array == possible_attribute_value]
y_outcomes = np.unique(subset_of_y_values)
for y_outcome in y_outcomes:
y_num_value = len(subset_of_y_values
[subset_of_y_values == y_outcome])
value_info_gain += \
float(y_num_value)/len(subset_of_y_values) \
* np.log2(float(y_num_value)/len(subset_of_y_values))
attribute_info_gain += \
float(numerator_values[possible_attribute_value])/len(y) * \
-1 * value_info_gain
return entropy(y) - attribute_info_gain
def potential_information_by_attribute_categorical(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the potential information gain accdg to Quinlan's C4.5
'''
attribute_value_array = df[attribute].values
possible_attribute_values = np.unique(attribute_value_array)
potential_information = 0
for possible_attribute_value in possible_attribute_values:
subset_of_y = y[attribute_value_array == possible_attribute_value]
potential_information += \
(float(len(subset_of_y))/len(y)) \
* np.log2(float(len(subset_of_y))/len(y))
return -1 * potential_information
def information_gain_ratio_categorical(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
information_gain = \
information_gain_by_attribute_categorical(attribute, df, y)
potential_information = \
potential_information_by_attribute_categorical(attribute, df, y)
return float(information_gain)/potential_information
'''
this function computes for information gain ratio, checks first if it is
categorical or continuous, and then calls the appropriate functions
currently works for dataframes only
'''
def information_gain_ratio(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5, and checks
if the feature is continuous or categorical so as to appropriately
compute for the information gain ratio
'''
if check_if_categorical(attribute, df):
return information_gain_ratio_categorical(attribute, df, y)
else:
return information_gain_ratio_continuous(attribute, df, y)
'''
these functions load toy data to test the functions on
'''
def load_play_golf():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the playgold.csv toy dataset
'''
df = pd.read_csv('data/playgolf.csv')
df.columns = [c.lower() for c in df.columns]
y = df.pop('result')
y = y.values
X = df.values
return df, X, y
def load_labor_negotiations_data():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the labor-neg.data.txt dataset
'''
df = pd.read_csv('data/labor-neg.data.txt', header=None)
df.columns = ['dur', 'wage1', 'wage2', 'wage3', 'cola', 'hours', 'pension',
'stby_pay', 'shift_diff', 'educ_allw', 'holidays',
'vacation', 'lngtrm_disabil', 'dntl_ins', 'bereavement',
'empl_hpln', 'target']
y = df.pop('target')
y = y.values
X = df.values
return df, X, y
def load_contraceptive_data():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the cmc.data.txt dataset
'''
df = pd.read_csv('data/cmc.data.txt', header=None)
df.columns = ['wife_age', 'wife_educ', 'hus_educ', 'num_kids', 'wife_rel',
'wife_work_status', 'hus_job', 'living_std',
'media_expo', 'label']
y = df.pop('label')
y = np.array(y)
X = df.values
return df, X, y
if __name__ == "__main__":
df, X, y = load_play_golf()
print('information_gain')
for attribute in df.columns:
print(attribute,
information_gain_by_attribute_categorical(attribute, df, y))
print('')
print('split_information_gain')
for attribute in df.columns:
print(attribute,
potential_information_by_attribute_categorical(attribute, df, y))
print('')
print('information_gain_ratio')
for attribute in df.columns:
print(attribute, information_gain_ratio_categorical(attribute, df, y))
print('\ntest information gain for temperature')
print(information_gain_ratio_continuous('humidity', df, y))
print(information_gain_ratio_continuous_1d(df['humidity'].values, y))
|
mit
|
bheinzelman/NBAShotPredictor
|
viz.py
|
1
|
4721
|
'''
File to generate visualizations from the
nba shot log
indexes
LOCATION--------------0
W---------------------1
FINAL_MARGIN----------2
PERIOD----------------3
SHOT_DIST-------------4
SHOT_RESULT-----------5
CLOSEST_DEFENDER------6
CLOSE_DEF_DIST--------7
player_name-----------8
'''
import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as pyplot
from lib.Table import Table
import os
def remove_shotclock_nas(table):
shotclock = 8
gameclock = 7
for row in table.table:
if len(row[shotclock]) == 0:
new_time = row[gameclock].replace(':', '.')
row[shotclock] = new_time
def get_shot_dist_ntiles(n, idx):
ds = Table(file="datasets/shot_logs.csv")
ds.table = ds.table[1:]
remove_shotclock_nas(ds)
table = map(lambda r: r[:idx] + [float(r[idx])] + r[idx:], ds.table)
s_table = sorted(table, key=lambda x: x[idx])
quarter_len = len(table)/n
quartiles = [str(s_table[quarter_len*i: (quarter_len*(i+1))][-1][idx]) for i in xrange(n)]
# quartiles[0] = '0-' + str(quartiles[0])
labels = []
prev = str(0.0)
for val in quartiles:
labels.append(str(prev) + '-\n' + str(val))
prev = val
return labels
def bar_graph_continuous(table, idx, other_idx, xlabel, ylabel, name, title):
RESULT = 5
groups = table.group_by(idx)
groups.sort(key=lambda g: g.table[0][idx])
made = [0 for _ in groups]
missed = [0 for _ in groups]
for i, group in enumerate(groups):
for row in group.table:
if row[RESULT] == 'made':
made[i] += 1
else:
missed[i] += 1
made.reverse()
missed.reverse()
pyplot.figure()
fig, ax = pyplot.subplots()
r1 = ax.bar(range(1, len(groups) + 1), made, 0.3, color='g')
r2_v = map(lambda x: x + 0.3, range(1, len(groups) + 1))
r2 = ax.bar(r2_v, missed, 0.3, color='r')
ax.set_xticks(map(lambda x: x + 0.3, range(1, len(groups) + 1)))
ax.set_xticklabels(get_shot_dist_ntiles(len(groups), other_idx))
ax.legend((r1[0], r2[0]), ('Made', 'Missed'), loc=2)
pyplot.grid(True)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
pyplot.title(title)
pyplot.savefig(name)
pyplot.close()
def bar_graph_categorical(table, idx, xlabel, ylabel, name, title, **kwargs):
RESULT = 5
location = kwargs.get('loc', 2)
groups = table.group_by(idx, type="map")
made = [0 for key in groups.keys()]
missed = [0 for key in groups.keys()]
key_map = {key: i for i, key in enumerate(groups.keys())}
for key in groups.keys():
for row in groups[key].table:
if row[RESULT] == 'made':
made[key_map[key]] += 1
else:
missed[key_map[key]] += 1
group_count = len(groups.keys())
pyplot.figure()
fig, ax = pyplot.subplots()
r1 = ax.bar(range(1, group_count + 1), made, 0.3, color='g')
r2_v = map(lambda x: x + 0.3, range(1, group_count + 1))
r2 = ax.bar(r2_v, missed, 0.3, color='r')
ax.set_xticks(map(lambda x: x + 0.3, range(1, group_count + 1)))
ax.legend((r1[0], r2[0]), ('Made', 'Missed'), loc=location)
ax.set_xticklabels(key_map.keys())
pyplot.grid(True)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
pyplot.title(title)
pyplot.savefig(name)
pyplot.close()
if __name__ == '__main__':
RESULT = 5
SHOT_DIST = 4
SHOT_DIST_ORIG = 11
DEF_DIST = 7
DEF_DIST_ORIG = 16
LOCATION = 0
GAME_OUTCOME = 1
MARGIN = 2
MARGIN_ORIG = 4
SHOT_CLOCK = 9
SHOT_CLOCK_ORIG = 8
PERIOD = 3
ds = Table(file="datasets/shot_log.min.csv")
ds.table = ds.table[1:]
if not os.path.exists('viz'):
os.makedirs('viz')
bar_graph_continuous(ds, SHOT_DIST, SHOT_DIST_ORIG, "Shot Distance (FT)", "Count", "viz/shot_distance.png", "Shot Distance")
bar_graph_continuous(ds, DEF_DIST, DEF_DIST_ORIG, "Defensive Distance (FT)", "Count", "viz/def_distance.png", "Defender Distance")
bar_graph_continuous(ds, MARGIN, MARGIN_ORIG, "Final Margin (PTS)", "Count", "viz/margin.png", "Shots by Final Margin")
bar_graph_continuous(ds, SHOT_CLOCK, SHOT_CLOCK_ORIG, "Shot Clock (seconds)", "Count", "viz/shot_clock.png", "Shots by Shotclock")
bar_graph_categorical(ds, LOCATION, "Location H/A", "Count", "viz/home_away.png", "Shots by Location")
bar_graph_categorical(ds, GAME_OUTCOME, "Game Outcome W/L", "Count", "viz/win_lose.png", "Shots by Game Outcome")
bar_graph_categorical(ds, PERIOD, "Period", "Count", "viz/period.png", "Shots by Period", loc=1)
|
mit
|
khkaminska/scikit-learn
|
examples/linear_model/plot_sgd_separating_hyperplane.py
|
260
|
1219
|
"""
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
biosustain/marsi
|
tests/test_data_retrieval.py
|
1
|
3955
|
# Copyright 2017 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from pandas import DataFrame
from marsi.io import retriaval, parsers
TRAVIS = os.getenv("TRAVIS", False)
if TRAVIS: # TRAVIS value is 'true'
TRAVIS = True
@pytest.mark.skipif(TRAVIS, reason="Do not download on travis")
def test_retrieve_bigg(tmpdir):
bigg_dir = tmpdir.mkdir("bigg")
dest = bigg_dir.join("bigg_models_reactions.txt")
retriaval.retrieve_bigg_reactions(dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
dest = bigg_dir.join("bigg_models_metabolites.txt")
retriaval.retrieve_bigg_metabolites(dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
@pytest.mark.skipif(TRAVIS, reason="Do not download on travis")
def test_retrieve_drugbank(tmpdir):
drugbank_dir = tmpdir.mkdir("drugbank")
dest = drugbank_dir.join("drugbank_open_structures.sdf")
retriaval.retrieve_drugbank_open_structures(dest=dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
dest = drugbank_dir.join("drugbank_open_vocabulary.txt")
retriaval.retrieve_drugbank_open_vocabulary(dest=dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
@pytest.mark.skipif(TRAVIS, reason="Do not download on travis")
def test_retrieve_chebi(tmpdir):
chebi_dir = tmpdir.mkdir("chebi")
sdf_dest = chebi_dir.join("chebi_lite_3star.sdf")
retriaval.retrieve_chebi_structures(dest=sdf_dest.strpath)
statinfo = os.stat(sdf_dest.strpath)
assert statinfo.st_size > 0
names_dest = chebi_dir.join("chebi_names_3star.txt")
retriaval.retrieve_chebi_names(dest=names_dest.strpath)
statinfo = os.stat(names_dest.strpath)
assert statinfo.st_size > 0
relation_dest = chebi_dir.join("chebi_relation_3star.sdf")
retriaval.retrieve_chebi_relation(dest=relation_dest.strpath)
statinfo = os.stat(relation_dest.strpath)
assert statinfo.st_size > 0
vertice_dest = chebi_dir.join("chebi_vertice_3star.sdf")
retriaval.retrieve_chebi_vertice(dest=vertice_dest.strpath)
statinfo = os.stat(vertice_dest.strpath)
assert statinfo.st_size > 0
chebi_data = parsers.parse_chebi_data(names_dest.strpath, vertice_dest.strpath, relation_dest.strpath)
assert isinstance(chebi_data, DataFrame)
assert len(chebi_data) > 0
assert 'compound_id' in chebi_data.columns
@pytest.mark.skipif(TRAVIS, reason="Do not download on travis")
def test_retrieve_brite(tmpdir):
kegg_dir = tmpdir.mkdir("kegg")
dest = kegg_dir.join("kegg_brite_08310.keg")
retriaval.retrieve_kegg_brite(dest=dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
brite_data = parsers.parse_kegg_brite(dest.strpath)
assert isinstance(brite_data, DataFrame)
assert len(brite_data) > 1
@pytest.mark.skip(reason="Takes too long to download, because it is a large file")
def test_retrieve_zinc(tmpdir):
zinc_dir = tmpdir.mkdir("zinc")
dest = zinc_dir.join("zinc_16_prop.tsv")
retriaval.retrieve_zinc_properties(dest=dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
dest = zinc_dir.join("zinc_16.sdf.gz")
retriaval.retrieve_zinc_structures(dest=dest.strpath)
statinfo = os.stat(dest.strpath)
assert statinfo.st_size > 0
|
apache-2.0
|
alekz112/statsmodels
|
statsmodels/regression/_prediction.py
|
27
|
6035
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid,
df=None, dist=None, row_labels=None):
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper))
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha, obs=False)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
to_include['obs_ci_lower'] = ci_obs[:, 0]
to_include['obs_ci_upper'] = ci_obs[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels)
|
bsd-3-clause
|
Lawrence-Liu/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
156
|
17626
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
bsd-3-clause
|
dcprojects/CoolProp
|
dev/scripts/viscosity_builder.py
|
5
|
3715
|
from math import sqrt,exp
from CoolProp.CoolProp import Props
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
from math import log
E_K = {'REFPROP-Ammonia':386,
'REFPROP-Argon':143.2
}
SIGMA = {'REFPROP-Ammonia':0.2957,
'REFPROP-Argon':0.335
}
E_K['REFPROP-Propane']=263.88
SIGMA['REFPROP-Propane']=0.49748
E_K['REFPROP-R32']=289.65
SIGMA['REFPROP-R32']=0.4098
E_K['REFPROP-R245fa'] = 329.72
SIGMA['REFPROP-R245fa'] = 0.5529
def viscosity_dilute(fluid,T,e_k,sigma):
"""
T in [K], e_k in [K], sigma in [nm]
viscosity returned is in [Pa-s]
"""
Tstar = T/e_k
molemass = Props(fluid,'molemass')
if fluid == 'Propane' or fluid == 'REFPROP-Propane':
a = [0.25104574,-0.47271238,0,0.060836515,0]
theta_star = exp(a[0]*pow(log(Tstar),0)+a[1]*pow(log(Tstar),1)+a[3]*pow(log(Tstar),3));
eta_star = 0.021357*sqrt(molemass*T)/(pow(sigma,2)*theta_star)/1e6;
return eta_star
# From Neufeld, 1972, Journal of Chemical Physics - checked coefficients
OMEGA_2_2 = 1.16145*pow(Tstar,-0.14874)+ 0.52487*exp(-0.77320*Tstar)+2.16178*exp(-2.43787*Tstar)
# Using the leading constant from McLinden, 2000 since the leading term from Huber 2003 gives crazy values
eta_star = 26.692e-3*sqrt(molemass*T)/(pow(sigma,2)*OMEGA_2_2)/1e6
return eta_star
def viscosity_linear(fluid, T, rho, e_k, sigma):
"""
Implements the method of Vogel 1998 (Propane) for the linear part
"""
N_A=6.02214129e23
molemass = Props(fluid,'molemass')
Tstar = T/e_k
b= [-19.572881,219.73999,-1015.3226,2471.01251,-3375.1717,2491.6597,-787.26086,14.085455,-0.34664158]
s = sum([b[i]*pow(Tstar,-0.25*i) for i in range(7)])
B_eta_star = s+b[7]*pow(Tstar,-2.5)+b[8]*pow(Tstar,-5.5) #//[no units]
B_eta = N_A*pow(sigma/1e9,3)*B_eta_star #[m3/mol]
return viscosity_dilute(fluid,T,e_k,sigma)*B_eta*rho/molemass*1000
from PDSim.misc.datatypes import Collector
RHO = Collector()
TT = Collector()
DELTA = Collector()
TAU = Collector()
VV = Collector()
VV0 = Collector()
VV1 = Collector()
VVH = Collector()
fluid = 'REFPROP-R32'
Tc = Props(fluid,'Tcrit')
rhoc = Props(fluid,'rhocrit')
for T in np.linspace(290,Props(fluid,'Tcrit')-0.1,100):
rhoV = Props('D','T',T,'Q',1,fluid)
rhoL = Props('D','T',T,'Q',0,fluid)
rhomax = Props('D','T',Props(fluid,'Tmin'),'Q',0,fluid)
for rho in list(np.linspace(rhoL,rhomax,100)):#+list(np.linspace(rhoV,0.0001,100)):
#for rho in list(np.linspace(rhoV,0.0001,100)):
mu_0 = viscosity_dilute(fluid,T,E_K[fluid],SIGMA[fluid])
mu_1 = viscosity_linear(fluid,T,rho,E_K[fluid],SIGMA[fluid])
mu = Props('V','T',T,'D',rho,fluid)
VV << mu
VV0 << mu_0
VV1 << mu_1
VVH << mu-mu_0-mu_1
TT << T
RHO << rho
DELTA << rho/rhoc
TAU << Tc/T
def f_RHS(E, DELTA_TAU, VV):
k = 0
sum = 0
DELTA = DELTA_TAU[0,:]
TAU = DELTA_TAU[1,:]
for i in range(2,5):
for j in range(3):
sum += E[k]*DELTA**i/TAU**j
k += 1
# f1,f2,f3,g1,g2 = E[k],E[k+1],E[k+2],E[k+3],E[k+4]
# DELTA0 = g1*(1+g2*np.sqrt(TAU))
# sum += (f1+f2/TAU+f3/TAU/TAU)*(DELTA/(DELTA0-DELTA)-DELTA/DELTA0)
print np.mean(np.abs(((sum/VV-1)*100))),'%'
return sum
log_muH = np.log(VVH.v().T)
x = np.c_[DELTA.v().T, TAU.v().T].T
y = VVH.v()
linear = Model(f_RHS, extra_args = (y,) )
mydata = Data(x, y)
myodr = ODR(mydata, linear, beta0=np.array([0.1]*17),)
myoutput = myodr.run()
E = myoutput.beta
print E
#plt.plot(TT.vec, y,'b.',TT.vec, f_RHS(E, x, y),'r.')
#plt.show()
#plt.plot()
plt.plot(y.T,f_RHS(E, x, y))
plt.show()
|
mit
|
simon-pepin/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
226
|
4954
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
examples/neural_networks/plot_mlp_alpha.py
|
58
|
4088
|
"""
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
doanduyhai/incubator-zeppelin
|
python/src/main/resources/grpc/python/ipython_client.py
|
27
|
1457
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import ipython_pb2
import ipython_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50053')
stub = ipython_pb2_grpc.IPythonStub(channel)
response = stub.execute(ipython_pb2.ExecuteRequest(code="import time\nfor i in range(1,4):\n\ttime.sleep(1)\n\tprint(i)\n" +
"%matplotlib inline\nimport matplotlib.pyplot as plt\ndata=[1,1,2,3,4]\nplt.figure()\nplt.plot(data)"))
for r in response:
print("output:" + r.output)
response = stub.execute(ipython_pb2.ExecuteRequest(code="range?"))
for r in response:
print(r)
if __name__ == '__main__':
run()
|
apache-2.0
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/py-sfepy/package.py
|
5
|
2230
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySfepy(PythonPackage):
"""SfePy (http://sfepy.org) is a software for solving systems of coupled
partial differential equations (PDEs) by the finite element method in 1D,
2D and 3D. It can be viewed both as black-box PDE solver, and as a Python
package which can be used for building custom applications.
"""
homepage = "http://sfepy.org"
url = "https://github.com/sfepy/sfepy/archive/release_2017.3.tar.gz"
version('2017.3', '65ab606a9fe80fccf17a7eb5ab8fd025')
variant('petsc', default=False, description='Enable PETSc support')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-six', type='run')
depends_on('py-scipy', type='run')
depends_on('py-matplotlib', type='run')
depends_on('py-sympy', type='run')
depends_on('hdf5+hl', type='run')
depends_on('py-pytables', type='run')
depends_on('py-petsc4py', type='run', when='+petsc')
|
lgpl-2.1
|
ilyes14/scikit-learn
|
sklearn/neighbors/base.py
|
71
|
31147
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
bsd-3-clause
|
zihua/scikit-learn
|
sklearn/ensemble/forest.py
|
1
|
66530
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
bsd-3-clause
|
rvraghav93/scikit-learn
|
sklearn/manifold/setup.py
|
43
|
1283
|
import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
cdegroc/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
4
|
1559
|
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False, seed=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
|
bsd-3-clause
|
ratschlab/RNA-geeq
|
eval/gen_alignment_statistics.py
|
1
|
18372
|
"""This script generates statistical overviews for a given alignment. """
import sys
import os
import re
import subprocess
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy as sp
import numpy.random as npr
import h5py
import time
import pdb
from modules.utils import *
from modules.plotting import *
from optparse import OptionParser, OptionGroup
def parse_options(argv, parser):
"""Parses options from the command line """
optional = OptionGroup(parser, 'OPTIONAL')
optional.add_option('-g', '--genome', dest='genome', metavar='FILE', help='genome in fasta or hdf5 format (needs ending .hdf5 for latter)', default='-')
optional.add_option('-I', '--ignore_missing_chr', dest='ignore_missing_chr', action='store_true', help='ignore chromosomes missing in the annotation', default=False)
optional.add_option('-s', '--shift_start', dest='shift_start', action='store_false', help='turn shifting start of softclips to accomodate for old bug OFF - it is usually ON!', default=True)
optional.add_option('-b', '--bam_input', dest='bam_input', action='store_true', help='input has BAM format - does not work for STDIN', default=False)
optional.add_option('-S', '--samtools', dest='samtools', metavar='PATH', help='if SAMtools is not in your PATH, provide the right path here (only neccessary for BAM input)', default='samtools')
optional.add_option('-o', '--outfile_base', dest='outfile_base', metavar='PATH', help='basename for outfiles written [align_stats]', default='align_stats')
optional.add_option('-L', '--legend', dest='legend', action='store_true', help='put legend into plots [off]', default=False)
optional.add_option('-l', '--lines', dest='lines', metavar='INT', type='int', help='maximal number of alignment lines to read [-]', default=None)
optional.add_option('-r', '--random', dest='random', metavar='FLOAT', type='float', help='probability to accept an input line -- effective subsampling [1.0]', default=1.0)
optional.add_option('-m', '--max_readlength', dest='max_readlen', metavar='INT', type='int', help='maximal read length to be considered [200]', default=200)
optional.add_option('-v', '--verbose', dest='verbose', action='store_true', help='verbosity', default=False)
optional.add_option('-d', '--debug', dest='debug', action='store_true', help='print debugging output', default=False)
parser.add_option_group(optional)
return parser.parse_args()
def get_tags(sl):
"""Extract tags from SAM line and return as dict"""
#return dict(z for z in [(x[0], int(x[2])) if x[1] == 'i' else (x[0], float(x[2])) if x[1] == 'f' else (x[0], x[2]) for x in [y.split(':') for y in sl]])
tags = dict()
for s in sl:
ssl = s.split(':')
#if ssl[1] == 'i':
# tags[ssl[0]] = int(ssl[2])
#elif ssl[1] == 'f':
# tags[ssl[0]] = float(ssl[2])
#else:
tags[ssl[0]] = ssl[2]
return tags
def main():
"""Main function generating the alignment statistics."""
### get command line arguments
parser = OptionParser(usage="%prog [options] LIST OF ALIGNMENT FILES")
(options, args) = parse_options(sys.argv, parser)
if len(args) == 0:
parser.print_help()
sys.exit(1)
### load genome
if options.genome != '-':
if options.genome.split('.')[-1] == 'hdf5':
genome = hdf52dict(options.genome)
for g in genome:
genome[g] = str(genome[g])
else:
genome = read_fasta(options.genome)
infiles = args
### check, if infile is hdf5, in this case only do the plotting
if infiles[0].endswith('hdf5'):
for i, fname in enumerate(infiles):
print >> sys.stdout, 'Loading counts from hdf5 %s' % fname
h5_in = h5py.File(fname)
if i == 0:
plot_info = h5_in['plot_info'][:]
counts = dict()
filelist = h5_in['files'][:]
for key in h5_in:
if key in ['files', 'plot_info']:
continue
counts[key] = h5_in[key][:]
else:
filelist = sp.r_[filelist, h5_in['files'][:]]
for key in h5_in:
if key in ['files', 'plot_info']:
continue
if len(h5_in[key].shape) > 1 and h5_in[key].shape[1] > counts[key].shape[1]:
counts[key] = sp.c_[counts[key], sp.zeros((counts[key].shape[0], h5_in[key].shape[1] - counts[key].shape[1]))]
counts[key] = sp.r_[counts[key], h5_in[key][:]]
elif len(h5_in[key].shape) > 1 and h5_in[key].shape[1] < counts[key].shape[1]:
tmp = h5_in[key][:]
tmp = sp.c_[tmp, sp.zeros((tmp.shape[0], counts[key].shape[1] - h5_in[key].shape[1]))]
counts[key] = sp.r_[counts[key], tmp]
else:
counts[key] = sp.r_[counts[key], h5_in[key][:]]
h5_in.close()
else:
### initializations
filter_counter = 0
unspliced = 0
readlen = 0
max_readlen = 30
counts = dict()
for category in ['mismatches', 'deletions', 'insertions', 'qualities_per_pos', 'intron_pos', 'min_seg_len']:
counts[category] = sp.zeros((len(infiles), options.max_readlen), dtype='int')
counts['qualities'] = sp.zeros((len(infiles), 80), dtype='int')
counts['number_of_segments'] = sp.zeros((len(infiles), 10), dtype='int')
counts['deletion_lens'] = sp.zeros((len(infiles), 500), dtype='int')
counts['insertion_lens'] = sp.zeros((len(infiles), 500), dtype='int')
counts['multimappers'] = sp.zeros((len(infiles), 1000), dtype='int')
for category in ['unaligned_reads', 'primary_alignments', 'secondary_alignments', 'unique_alignments', 'non_unique_alignments']:
counts[category] = sp.zeros((len(infiles), ), dtype='int')
t0 = time.time()
### iterate over infiles
for f, fname in enumerate(infiles):
### open infile handle
if fname == '-':
infile = sys.stdin
elif options.bam_input:
fh = subprocess.Popen([options.samtools, 'view', fname], stdout=subprocess.PIPE)
infile = fh.stdout
else:
infile = open(fname, 'r')
taken_ids = set()
if options.verbose:
print >> sys.stdout, 'Parsing alignments from %s' % fname
for counter, line in enumerate(infile):
if line[0] in ['@', '#' ] or line[:2] == 'SQ':
continue
if options.lines is not None and counter > options.lines:
break
if options.verbose and counter > 0 and counter % 100000 == 0:
t1 = time.time()
print 'lines read: [ %s (taken: %s / filtered: %s)] ... took %i sec' % (counter, counter - filter_counter, filter_counter, t1 - t0)
t0 = t1
sl = line.strip().split('\t')
if options.random < 1.0:
if npr.rand() > options.random and not sl[0] in taken_ids:
continue
else:
taken_ids.add(sl[0])
if len(sl) < 11:
filter_counter += 1
continue
### check if unmapped
if ((int(sl[1]) & 4) == 4):
counts['unaligned_reads'][f] +=1
continue
if sl[9] != '*':
readlen = len(sl[9])
read = sl[9].upper()
max_readlen = max(readlen, max_readlen)
else:
print >> sys.stderr, 'No read sequence given in SAM'
sys.exit(-1)
is_secondary = ((int(sl[1]) & 256) == 256)
if is_secondary:
counts['secondary_alignments'][f] += 1
else:
counts['primary_alignments'][f] += 1
tags = get_tags(sl[11:])
if 'NH' in tags:
if int(tags['NH']) == 1:
counts['unique_alignments'][f] += 1
else:
counts['non_unique_alignments'][f] += 1
counts['multimappers'][f, int(tags['NH'])] += 1
is_reversed = ((int(sl[1]) & 16) == 16)
### check, if read is reversed -> must change coordinates
if is_reversed:
_reversed = readlen - 1
else:
_reversed = 0
### record min segment length for spliced alignments
if 'N' in sl[5]:
__cig = sl[5]
__cig = re.sub('[0-9]*[IHS]', '', __cig)
min_sl = min([sum([int('0'+i) for i in re.split('[^0-9]', '0' + _cig + 'Z0')][:-2]) for _cig in __cig.strip().split('N')])
counts['min_seg_len'][f, min_sl] += 1
### count exons / segments in read
counts['number_of_segments'][f, sl[5].count('N') + 1] += 1
### count intron distribution for spliced reads
### the intron position is measured as the length of the first exon/segment (0-based position counting)
### handle deletions - they do not affect block length
rl = sl[5]
rl = re.sub('[0-9]*D', '', rl)
rl = re.sub('[MISH]', 'M', rl) ### for this analysis softclips and hardclips are counted as positions in the original read
segm_len = sp.cumsum([sp.array(x.split('M')[:-1], dtype='int').sum() for x in ('%s0' % rl).split('N')])
### in case of alignment to minus strand position is reversed
for s in segm_len[:-1]:
counts['intron_pos'][f, abs(_reversed - s)] += 1
else:
unspliced += 1
### count exons / segments in read
counts['number_of_segments'][f, 1] += 1
### build up mismatch-statistics from genome if MD tag is not available
(size, op) = (re.split('[^0-9]', sl[5])[:-1], re.split('[0-9]*', sl[5])[1:])
size = [int(i) for i in size]
chrm_pos = 0 # position in chrm
read_pos = 0 # actual position in the read
clipped_read_pos = 0
for pos in range(len(size)):
if op[pos] == 'M' and options.genome != '-':
gen_start = int(sl[3]) - 1
try:
gen = genome[sl[2]][gen_start + chrm_pos : gen_start + chrm_pos + size[pos]].upper()
except:
if options.ignore_missing_chr:
continue
else:
print >> sys.stderr, 'Chromosome name %s could not be found in %s' % (sl[2], options.genome)
sys.exit(1)
for p in range(size[pos]):
try:
if gen[p] != read[read_pos + p]:
counts['mismatches'][f, abs(_reversed - (clipped_read_pos + read_pos + p))] += 1
except IndexError:
if options.debug:
print >> sys.stderr, 'gen: %s' % gen
print >> sys.stderr, 'read: %s' % read
print >> sys.stderr, 'pos in gen: %i' % p
print >> sys.stderr, 'pos in read: %i' % (read_pos + p)
pdb.set_trace()
else:
print >> sys.stderr, 'Index Error in line:\n %s' % line
sys.exit(1)
chrm_pos += size[pos]
read_pos += size[pos]
elif op[pos] == 'I': # insertions
counts['insertion_lens'][f, size[pos]] += 1
_p = abs(_reversed - (read_pos + clipped_read_pos))
counts['insertions'][f, _p:_p + size[pos]] += 1
# for _p in range(size[pos]):
# counts['insertions'][f, abs(_reversed - (read_pos + _p + clipped_read_pos))] += 1
read_pos += size[pos]
elif op[pos] == 'D': # deletions
counts['deletion_lens'][f, size[pos]] += 1
counts['deletions'][f, abs(_reversed - read_pos - clipped_read_pos)] += 1 # count only one deletion, not depending on number of positions deleted. ...size[pos]
chrm_pos += size[pos]
elif op[pos] == 'N': # introns
chrm_pos += size[pos]
elif op[pos] == 'S': # softclips
read_pos += size[pos]
if options.shift_start:
chrm_pos += size[pos]
elif op[pos] == 'H': # hardclips
clipped_read_pos += size[pos]
### build up quality distribution (only for primary alignments as this is a property of the key)
### do it only for 1% of the reads as it is too costly otherwise
if not is_secondary and npr.random() < 0.01:
if len(sl) > 10 and sl[10] != '*':
if is_reversed:
quality_string = sl[10][::-1]
else:
quality_string = sl[10]
for _pidx, _p in enumerate(quality_string):
counts['qualities'][f, ord(_p)] += 1
counts['qualities_per_pos'][f, _pidx] += ord(_p)
### clean up
if fname != '-':
infile.close()
del taken_ids
### truncate counts to max non-zero x
for c in counts:
if len(counts[c].shape) > 1:
max_idx = 0
for i in range(counts[c].shape[0]):
idx = sp.where(counts[c][i, :] > 0)[0]
if idx.shape[0] > 0:
max_idx = max(max_idx, min(idx[-1] + 1, counts[c].shape[1]))
else:
max_idx = counts[c].shape[1]
counts[c] = counts[c][:, :max_idx]
else:
idx = sp.where(counts[c] > 0)[0]
if idx.shape[0] > 0:
max_idx = min(idx[-1] + 1, counts[c].shape[0])
counts[c] = counts[c][:max_idx]
### collect plot_info
### [data_field, plot_type, transformation, x-label, y-label, title']
plot_info = [
['intron_pos', 'plot', '', 'read position', 'frequency', 'Split Position Distribution'],
['number_of_segments', 'bar', 'log10', 'number of segments', 'frequency', 'Number of Segments'],
['mismatches', 'plot', '', 'read position', 'mismatches', 'Mismatch Distribution'],
['insertions', 'plot', '', 'read position', 'insertions', 'Insertion Distribution'],
['deletions', 'plot', '', 'read position', 'deletions', 'Deletion Distribution'],
['qualities', 'plot', '', 'phred score', 'fequency', 'Quality Value Distribution'],
['qualities_per_pos', 'plot', '', 'read position', 'avg. quality', 'Position-wise Quality Distribution'],
['deletion_lens', 'plot', '', 'deletion length', 'frequency', 'Deletion Length Distribution'],
['insertion_lens', 'plot', '', 'deletion length', 'frequency', 'Insertion Length Distribution'],
['min_seg_len', 'plot', '', 'shortest segment length', 'frequency', 'Shortest Segment Length Distribution'],
['multimappers', 'plot', '', 'number of hits', 'frequency', 'Distribution of Alignment Ambiguity'],
['primary_alignments', 'bar', '', 'sample', 'number of alignments', 'Number of Primary Alignments'],
['secondary_alignments', 'bar', '', 'sample', 'number of alignments', 'Number of Secondary Alignments'],
['unaligned_reads', 'bar', '', 'sample', 'number of unaligned reads', 'Number of Unaligned Reads'],
['unique_alignments', 'bar', '', 'sample', 'number of unique alignments', 'Number of Unique Alignments'],
['non_unique_alignments', 'bar', '', 'sample', 'number of non-unique alignments', 'Number of Non-unique Alignments'],
]
plot_info = sp.array(plot_info, dtype='str')
### store output as HDF5 file
h5_out = h5py.File('%s.hdf5' % options.outfile_base, 'w')
h5_out.create_dataset(name='files', data=sp.array(infiles, dtype='str'))
h5_out.create_dataset(name='plot_info', data=plot_info)
for key in counts:
h5_out.create_dataset(name=key, data=counts[key], dtype='int')
h5_out.close()
filelist = infiles
### plotting
fig = plt.figure(figsize=(15, 2*plot_info.shape[0]), dpi=300)
gs = gridspec.GridSpec((plot_info.shape[0] + 1) / 2, 2)
cmap = plt.get_cmap('jet')
norm = plt.Normalize(0, len(infiles))
axes = []
label_list = ['...' + x[-12:] if len(x) > 12 else x for x in filelist]
for i in range(plot_info.shape[0]):
axes.append(plt.subplot(gs[i / 2, i % 2]))
if options.legend:
plot(counts[plot_info[i, 0]], plot_info[i, :], ax=axes[-1], labels=label_list)
else:
plot(counts[plot_info[i, 0]], plot_info[i, :], ax=axes[-1])
plt.tight_layout()
### plot data
plt.savefig(options.outfile_base + '.overview.pdf', format='pdf')
if __name__ == '__main__':
main()
|
mit
|
areeda/gwpy
|
gwpy/plot/tests/test_segments.py
|
3
|
5755
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.segments`
"""
import pytest
import numpy
from matplotlib import rcParams
from matplotlib.colors import ColorConverter
from matplotlib.collections import PatchCollection
from ...segments import (Segment, SegmentList, SegmentListDict,
DataQualityFlag, DataQualityDict)
from ...time import to_gps
from .. import SegmentAxes
from ..segments import SegmentRectangle
from .test_axes import TestAxes as _TestAxes
# extract color cycle
COLOR_CONVERTER = ColorConverter()
COLOR_CYCLE = rcParams['axes.prop_cycle'].by_key()['color']
COLOR0 = COLOR_CONVERTER.to_rgba(COLOR_CYCLE[0])
class TestSegmentAxes(_TestAxes):
AXES_CLASS = SegmentAxes
@staticmethod
@pytest.fixture()
def segments():
return SegmentList([Segment(0, 3), Segment(6, 7)])
@staticmethod
@pytest.fixture()
def flag():
known = SegmentList([Segment(0, 3), Segment(6, 7)])
active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)])
return DataQualityFlag(name='Test segments', known=known,
active=active)
def test_plot_flag(self, ax, flag):
c = ax.plot_flag(flag)
assert c.get_label() == flag.texname
assert len(ax.collections) == 2
assert ax.collections[0] is c
flag.isgood = False
c = ax.plot_flag(flag)
assert tuple(c.get_facecolors()[0]) == (1., 0., 0., 1.)
c = ax.plot_flag(flag, known={'facecolor': 'black'})
c = ax.plot_flag(flag, known='fancy')
def test_plot_dqflag(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqflag(flag)
assert ax.collections # make sure it plotted something
def test_plot_dict(self, ax, flag):
dqd = DataQualityDict()
dqd['a'] = flag
dqd['b'] = flag
colls = ax.plot_dict(dqd)
assert len(colls) == len(dqd)
assert all(isinstance(c, PatchCollection) for c in colls)
assert colls[0].get_label() == 'a'
assert colls[1].get_label() == 'b'
colls = ax.plot_dict(dqd, label='name')
assert colls[0].get_label() == 'Test segments'
colls = ax.plot_dict(dqd, label='anything')
assert colls[0].get_label() == 'anything'
def test_plot_dqdict(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqdict(DataQualityDict(a=flag))
def test_plot_segmentlist(self, ax, segments):
c = ax.plot_segmentlist(segments)
assert isinstance(c, PatchCollection)
assert numpy.isclose(ax.dataLim.x0, 0.)
assert numpy.isclose(ax.dataLim.x1, 7.)
assert len(c.get_paths()) == len(segments)
assert ax.get_epoch() == segments[0][0]
# test y
p = ax.plot_segmentlist(segments).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 1.
p = ax.plot_segmentlist(segments, y=8).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 8.
# test kwargs
c = ax.plot_segmentlist(segments, label='My segments',
rasterized=True)
assert c.get_label() == 'My segments'
assert c.get_rasterized() is True
# test collection=False
c = ax.plot_segmentlist(segments, collection=False, label='test')
assert isinstance(c, list)
assert not isinstance(c, PatchCollection)
assert c[0].get_label() == 'test'
assert c[1].get_label() == ''
assert len(ax.patches) == len(segments)
# test empty
c = ax.plot_segmentlist(type(segments)())
def test_plot_segmentlistdict(self, ax, segments):
sld = SegmentListDict()
sld['TEST'] = segments
ax.plot(sld)
def test_plot(self, ax, segments, flag):
dqd = DataQualityDict(a=flag)
ax.plot(segments)
ax.plot(flag)
ax.plot(dqd)
ax.plot(flag, segments, dqd)
def test_insetlabels(self, ax, segments):
ax.plot(segments)
ax.set_insetlabels(True)
def test_fmt_data(self, ax):
# just check that the LIGOTimeGPS repr is in place
value = 1234567890.123
assert ax.format_xdata(value) == str(to_gps(value))
# -- disable tests from upstream
def test_imshow(self):
return NotImplemented
def test_segmentrectangle():
patch = SegmentRectangle((1.1, 2.4), 10)
assert patch.get_xy(), (1.1, 9.6)
assert numpy.isclose(patch.get_height(), 0.8)
assert numpy.isclose(patch.get_width(), 1.3)
assert patch.get_facecolor() == COLOR0
# check kwarg passing
patch = SegmentRectangle((1.1, 2.4), 10, facecolor='red')
assert patch.get_facecolor() == COLOR_CONVERTER.to_rgba('red')
# check valign
patch = SegmentRectangle((1.1, 2.4), 10, valign='top')
assert patch.get_xy() == (1.1, 9.2)
patch = SegmentRectangle((1.1, 2.4), 10, valign='bottom')
assert patch.get_xy() == (1.1, 10.0)
with pytest.raises(ValueError):
patch = SegmentRectangle((0, 1), 0, valign='blah')
|
gpl-3.0
|
OshynSong/scikit-learn
|
examples/decomposition/plot_pca_3d.py
|
354
|
2432
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
|
bsd-3-clause
|
Srisai85/numpy
|
numpy/lib/twodim_base.py
|
83
|
26903
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
bsd-3-clause
|
jsouza/pamtl
|
src/mtl/omtl.py
|
1
|
12248
|
import glob
from itertools import izip
import os
import sys
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_array, extmath
__author__ = 'desouza'
def compound_descriptor_online(features, task_id, task_num, feats_num):
num_samples, num_feats = features.shape
if num_feats != feats_num:
raise ValueError(
"number of features is different than the one declared.")
base = task_id * num_feats
offset = base + num_feats
task_block = np.zeros((num_samples, num_feats * task_num))
task_block[:, base:offset] = features
return task_block
def compound_descriptor(task_features, task_id, task_num):
task_features = check_array(task_features)
num_samples, num_feats = task_features.shape
base = task_id * num_feats
offset = base + num_feats
task_block = np.zeros((num_samples, num_feats * task_num))
task_block[:, base:offset] = task_features
return task_block
def compound_descriptor_at_once(tasks_features, tasks_labels):
"""
Prepares the input for the MTL Perceptron.
:param tasks_features list containing the feature vectors of the tasks (
each task is one item in the list)
:param tasks_labels list containing the labels of the vectors for each task
"""
task_num_feats = len(tasks_features)
task_num_labels = len(tasks_labels)
if task_num_feats != task_num_labels:
raise ValueError("number of tasks differ for features and labels.")
# checks if all tasks have the same number of features
num_feats = 0
for i, task_feats in enumerate(tasks_features):
num_feats = task_feats.shape[1]
if i > 0:
if task_feats.shape[1] != num_feats:
raise ValueError(
"number of features is different among the tasks.")
tasks_labels = [np.atleast_2d(labels).T for labels in tasks_labels]
compound_feats = []
for task_id, task_feats in enumerate(tasks_features):
base = task_id * num_feats
offset = base + num_feats
num_samples, num_feats = task_feats.shape
task_block = np.zeros((num_samples, num_feats * task_num_feats))
task_block[:, base:offset] = task_feats
compound_feats.append(task_block)
feats_arr = np.row_stack(tuple(compound_feats))
labels_arr = np.row_stack(tuple(tasks_labels))
return feats_arr, labels_arr
class OMTLClassifier(BaseEstimator, ClassifierMixin):
"""
Linear Perceptron Classifier proposed by Cavallanti et al.
"""
def __init__(self, task_num, feats_num, interaction="half"):
self.task_num = task_num
self.feats_num = feats_num
# inits coeficients to the number of tasks * features
self.coef_ = np.zeros(self.feats_num * self.task_num)
# inits interaction matrix
# by default, half update
self.A = (1.0 / (task_num + 1)) * ((np.identity(task_num) + 1) * 2)
if interaction == "ipl":
self.A = np.identity(self.task_num)
# self.A = (1.0 / task_num) * np.identity(self.task_num)
# self.A = np.linalg.inv(self.A)
# number of instances seen
self.t = 0
# number of instances discarded for being all zeroes
self.discarded = 0
# number of updates made to the coeficients
self.s = 0
def _get_task_id(self, X_inst):
a = np.nonzero(X_inst != 0)
first_element = a[0][0]
task_num = first_element / self.feats_num
return task_num
def fit(self, X, y):
# y = np.atleast_2d(y).T
X, y = check_array(X, y)
for x_i, y_i in izip(X, y):
self.partial_fit(x_i, y_i)
return self
def partial_fit(self, X_t, y_t):
# checks if features are non zero
if np.sum(X_t) == 0:
self.discarded += 1
return self
# updates the number of instances seen
self.t += 1
task_id_t = self._get_task_id(X_t)
y_pred_t = np.sign(np.dot(self.coef_, X_t))
if y_pred_t != y_t:
kron = np.dot(np.kron(self.A, np.identity(self.feats_num)), X_t)
# print kron.shape
self.coef_ = self.coef_ + y_t * kron
self.s += 1
# def partial_fit(self, X_t, y_t):
# # checks if features are non zero
# if np.sum(X_t) == 0:
# self.discarded += 1
# return self
#
# # updates the number of instances seen
# self.t += 1
#
# task_id_t = self._get_task_id(X_t)
#
# y_pred_t = np.sign(np.dot(self.coef_, X_t))
# if y_pred_t != y_t:
# tbegin = task_id_t * self.feats_num
# tend = tbegin + self.feats_num
#
# for task in xrange(self.task_num):
# begin = task * self.feats_num
# end = begin + self.feats_num
# self.coef_[begin:end] = self.coef_[begin:end] + y_t *
# self.A[task, task_id_t] * X_t[tbegin:tend]
#
# self.s += 1
def predict(self, X):
X = check_array(X)
y_preds = np.sign(np.dot(self.coef_, X.T))
return y_preds
def main_mtl():
input_dir = "/home/desouza/Projects/qe_da/domains_large_bb17/"
domains_names = ["it", "ted", "wsd"]
tasks_features = []
tasks_labels = []
est = PAMTLRegressor(3, 17)
for task_id, domain_name in enumerate(domains_names):
# tasks_features.append(
# np.loadtxt(glob.glob(input_dir + os.sep + "features/" + domain_name
# + "*.tsv")[0]))
X_train = np.loadtxt(
glob.glob(input_dir + os.sep + "features/" + domain_name + "*.tsv")[
0])
X_train[np.isnan(X_train)] = 0
task_block = compound_descriptor(X_train, task_id, len(domains_names))
# tasks_labels.append(
# np.loadtxt(glob.glob(input_dir + os.sep + "labels/" +
# domain_name + "*.hter")[0], ndmin=2))
y_train = np.loadtxt(
glob.glob(input_dir + os.sep + "labels/" + domain_name + "*.hter")[
0], ndmin=2)
print task_block.shape
print y_train.shape
est.fit(task_block, y_train)
print est.coef_
def main_mtl_test():
shuffles = int(sys.argv[1])
work_dir = sys.argv[2]
domains = ["it", "wsd", "ted"]
for seed in range(40, 40 + shuffles, 1):
print "### ", seed
input_dir = work_dir + os.sep + "stl_" + str(seed) + os.sep
src_feats_list = []
src_labels_list = []
for task_id, src_domain in enumerate(domains):
src_feat_paths = glob.glob(
input_dir + "features/*" + src_domain + "*src.csv")
src_label_paths = glob.glob(
input_dir + "labels/*" + src_domain + "*src.hter")
if len(src_feat_paths) != len(src_label_paths):
print "number of source feature files and label files " \
"differs: %d " \
"and %d" % (
len(src_feat_paths), len(src_label_paths))
sys.exit(1)
src_feat_paths_sorted = sorted(src_feat_paths)
src_label_paths_sorted = sorted(src_label_paths)
# for i in range(len(src_feat_paths_sorted)):
# only 100%
for i in [9]:
X_src = np.nan_to_num(
np.loadtxt(src_feat_paths_sorted[i], delimiter=","))
y_src = np.clip(np.loadtxt(src_label_paths_sorted[i], ndmin=2),
0, 1)
X_src_comp = compound_descriptor(X_src, task_id, len(domains))
# print X_src_comp.shape
src_feats_list.append(X_src_comp)
src_labels_list.append(y_src)
X_multiple_src = np.row_stack(tuple(src_feats_list))
y_multiple_src = np.row_stack(tuple(src_labels_list))
print X_multiple_src.shape
print y_multiple_src.shape
# shuffled_rows = np.arange(X_multiple_src.shape[0])
# np.random.shuffle(shuffled_rows)
# X_src_shuffled = X_multiple_src[shuffled_rows,:]
# y_src_shuffled = y_multiple_src[shuffled_rows,:]
# print X_src_shuffled.shape
# print y_src_shuffled.shape
param_grid = {
"C": [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
1.0],
"epsilon": [0.0001, 0.001, 0.01, 0.1],
"loss": ["pa", "pai", "paii"]}
search = GridSearchCV(PAMTLRegressor(3, 17), param_grid,
scoring='mean_absolute_error', n_jobs=8,
iid=False, refit=True, cv=5, verbose=1)
# est.fit(X_src_shuffled, y_src_shuffled)
# est.fit(X_multiple_src, y_multiple_src)
search.fit(X_multiple_src, y_multiple_src)
for task_id, tgt_domain in enumerate(domains):
tgt_feat_paths = glob.glob(
input_dir + "features/*" + tgt_domain + "*tgt.csv")
tgt_label_paths = glob.glob(
input_dir + "labels/*" + tgt_domain + "*tgt.hter")
tgt_feat_paths_sorted = sorted(tgt_feat_paths)
tgt_label_paths_sorted = sorted(tgt_label_paths)
X_tgt = np.nan_to_num(
np.loadtxt(tgt_feat_paths_sorted[0], delimiter=","))
# now there is only one label file for each target proportion
y_tgt = np.clip(np.loadtxt(tgt_label_paths_sorted[0]), 0, 1)
X_tgt_comp = compound_descriptor(X_tgt, task_id, len(domains))
# y_preds = est.predict(X_tgt_comp)
y_preds = search.predict(X_tgt_comp)
mae = mean_absolute_error(y_tgt, y_preds)
print "Domain %s\tMAE = %2.4f" % (domains[task_id], mae)
def main_mtl_test_pooling():
seed = sys.argv[1]
dir = sys.argv[2]
input_dir = "/home/desouza/Projects/qe_da/domains_large_bb17/" + dir + \
"/stl_" + seed + "/"
patt = "dom"
src_feat_paths = glob.glob(input_dir + "features/*" + patt + "*.csv")
src_label_paths = glob.glob(input_dir + "labels/*" + patt + "*.hter")
if len(src_feat_paths) != len(src_label_paths):
print "number of source feature files and label files differs: %d and " \
"" \
"" \
"" \
"%d" % (
len(src_feat_paths), len(src_label_paths))
sys.exit(1)
src_feat_paths_sorted = sorted(src_feat_paths)
src_label_paths_sorted = sorted(src_label_paths)
domains = ["it", "ted", "wsd"]
# for proportion in range(len(src_feat_paths_sorted)):
for proportion in [9]:
print "# Proportion %s" % proportion
X_src = np.loadtxt(src_feat_paths_sorted[proportion], delimiter=",")
y_src = np.loadtxt(src_label_paths_sorted[proportion])
# scales according to proportion
src_scaler = StandardScaler().fit(X_src)
X_src = src_scaler.transform(X_src)
X_src_comp = compound_descriptor_at_once(X_src, y_src)
est = PAMTLRegressor(17, 3)
est.partial_fit(X_src, y_src)
for task_id, domain in enumerate(domains):
print "## Domain %s" % domain
tgt_feat_paths = glob.glob(
input_dir + "features/" + domain + "*.tgt.csv")
tgt_label_paths = glob.glob(
input_dir + "labels/" + domain + "*.tgt.hter")
tgt_feat_paths_sorted = sorted(tgt_feat_paths)
tgt_label_paths_sorted = sorted(tgt_label_paths)
X_tgt = np.loadtxt(tgt_feat_paths_sorted[0], delimiter=",")
y_tgt = np.loadtxt(tgt_label_paths_sorted[0])
print "### Test on %s" % os.path.basename(tgt_feat_paths_sorted[0])
X_tgt_comp = compound_descriptor(X_tgt, task_id, len(domains))
y_preds = est.predict(X_tgt_comp)
mae = mean_absolute_error(y_tgt, y_preds)
print "MAE = %2.4f" % mae
if __name__ == "__main__":
main_mtl_test()
# main_mtl()
|
mit
|
fire-rs-laas/fire-rs-saop
|
python/fire_rs/demo_seganosa.py
|
1
|
9606
|
# Copyright (c) 2019, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Demo script showcasing propagation and planning in Galicia near SEGANOSA facilities"""
import os
import datetime
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm
import fire_rs.geodata.environment as g_environment
import fire_rs.geodata.display as display
from fire_rs.geodata.geo_data import GeoData
from fire_rs.firemodel.propagation import Environment, FirePropagation, TimedPoint
import fire_rs.planning.new_planning as planning
from fire_rs.planning.display import TrajectoryDisplayExtension, plot_plan_trajectories
# Set logger
FORMAT = '%(asctime)-23s %(levelname)-8s [%(name)s]: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.NOTSET)
planning.up.set_logger(logger)
def plot_sr_with_background(sr: 'planning.up.SearchResult', geodatadisplay, time_range,
output_options_plot,
plan: 'Union[str, int]' = 'final'):
"""Plot a plan trajectories with background geographic information in a geodata display."""
# Draw background layers
for layer in output_options_plot['background']:
if layer == 'elevation_shade':
geodatadisplay.draw_elevation_shade(
with_colorbar=output_options_plot.get('colorbar', True), layer='elevation')
if layer == 'elevation_planning_shade':
geodatadisplay.draw_elevation_shade(
with_colorbar=output_options_plot.get('colorbar', True), layer='elevation_planning')
elif layer == 'ignition_shade':
geodatadisplay.draw_ignition_shade(
with_colorbar=output_options_plot.get('colorbar', True))
# elif layer == 'observedcells':
# geodatadisplay.TrajectoryDisplayExtension.draw_observation_map(
# planner.expected_observed_map(layer_name="expected_observed"),
# layer='expected_observed', color='green', alpha=0.9)
# geodatadisplay.TrajectoryDisplayExtension.draw_observation_map(
# planner.expected_ignited_map(layer_name="expected_ignited"),
# layer='expected_ignited', color='red', alpha=0.9)
elif layer == 'ignition_contour':
try:
geodatadisplay.draw_ignition_contour(with_labels=True)
except ValueError as e:
logger.exception("ValueError while drawing ignition contour")
elif layer == 'wind_quiver':
geodatadisplay.draw_wind_quiver()
elif layer == 'utilitymap':
geodatadisplay.TrajectoryDisplayExtension.draw_utility_shade(
geodata=GeoData.from_cpp_raster(sr.final_plan().utility_map(), 'utility'),
layer='utility', vmin=0., vmax=1.)
plot_plan_trajectories(sr.plan(plan), geodatadisplay,
layers=output_options_plot["foreground"], time_range=time_range)
if __name__ == "__main__":
# WOLRD SETTINGS
FIRERS_DATA_FOLDER = os.environ['FIRERS_DATA']
FIRERS_DEM_DATA = os.path.join(FIRERS_DATA_FOLDER,
'dem')
FIRERS_WIND_DATA = os.path.join(FIRERS_DATA_FOLDER,
'wind')
FIRERS_LANDCOVER_DATA = os.path.join(FIRERS_DATA_FOLDER,
'landcover')
the_world = g_environment.World(elevation_path=FIRERS_DEM_DATA,
wind_path=FIRERS_WIND_DATA,
landcover_path=FIRERS_LANDCOVER_DATA)
seganosa_fire_env = Environment([[2799134.0, 2805134.0], [2296388.0, 2302388.0]],
3, 0, the_world)
# 6km by 6km around seganosa. Wind: 3m/s W->E
# FIRE PROPAGATION
## Fire started 6 hours ago. Predict until 2 hours in the future
fire_prop = FirePropagation(seganosa_fire_env)
four_hours_ago = (datetime.datetime.now() - datetime.timedelta(hours=6)).timestamp()
now = datetime.datetime.now().timestamp()
four_hours_from_now = (datetime.datetime.now() + datetime.timedelta(hours=2)).timestamp()
fire_start = TimedPoint(2802134.0 - 1500.0, 2299388.0, four_hours_ago)
fire_prop.set_ignition_point(fire_start)
fire_prop.propagate(until=four_hours_from_now)
## Figure terrain + ignition contour + ignition point
gdd = display.GeoDataDisplay.pyplot_figure(
seganosa_fire_env.raster.combine(fire_prop.ignitions().slice(["ignition"])),
frame=(0., 0.))
gdd.draw_elevation_shade(with_colorbar=False, cmap=matplotlib.cm.terrain)
gdd.draw_wind_quiver()
gdd.draw_ignition_contour(with_labels=True, cmap=matplotlib.cm.plasma)
gdd.draw_ignition_points(fire_start)
# gdd.figure.show()
gdd.figure.savefig(".".join(("demo_seganosa_propagation", "svg")), dpi=150, bbox_inches='tight')
# PLANNING
## Define the initial plan
## Takie off and landing at the same location: maximum flight time 45 minutes
## Utility map depen
f_data = planning.make_fire_data(fire_prop.ignitions(), seganosa_fire_env.raster)
tw = planning.TimeWindow(0, np.inf)
utility = planning.make_utility_map(fire_prop.ignitions(), flight_window=tw,
output_layer="utility")
trajectory_config = [planning.Trajectory(planning.TrajectoryConfig("Trajectory",
planning.UAVModels.x8("06"),
planning.Waypoint(
2801900.0 - 1500,
2298900.0 - 1500, 300.0,
0.0),
planning.Waypoint(
2801900.0 - 1500,
2298900.0 - 1500, 300.0,
0.0),
now,
2700.0,
planning.WindVector(3.0,
0.0)))]
the_plan = planning.Plan("observation_plan", trajectory_config, f_data, tw,
utility.as_cpp_raster("utility"))
initial_u_map = the_plan.utility_map()
planner = planning.Planner(the_plan, planning.VNSConfDB.demo_db()["demo"])
sr_1 = planner.compute_plan(10.0)
print("Flight mission length: {} minutes".format((sr_1.final_plan().trajectories()[0].start_times[-1] - sr_1.final_plan().trajectories()[0].start_times[0])/60.0))
## Display results
final_u_map = the_plan.utility_map()
gdd = display.GeoDataDisplay(
*display.get_pyplot_figure_and_axis(),
seganosa_fire_env.raster.combine(fire_prop.ignitions()),
frame=(0., 0.))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
plot_sr_with_background(sr_1, gdd, (four_hours_ago, four_hours_from_now),
{"background": ['elevation_shade', 'ignition_contour', 'wind_quiver'],
"foreground": ['trajectory_solid', 'arrows', 'bases']})
gdd.figure.savefig(".".join(("demo_seganosa_plan", "svg")), dpi=150, bbox_inches='tight')
gdd = display.GeoDataDisplay(
*display.get_pyplot_figure_and_axis(),
seganosa_fire_env.raster.combine(fire_prop.ignitions()),
frame=(0., 0.))
gdd.add_extension(TrajectoryDisplayExtension, (None,), {})
plot_sr_with_background(sr_1, gdd, (four_hours_ago, four_hours_from_now),
{"background": ['utilitymap'],
"foreground": ['trajectory_solid', 'arrows', 'bases']})
gdd.figure.savefig(".".join(("demo_seganosa_utility", "svg")), dpi=150, bbox_inches='tight')
print("end")
|
bsd-2-clause
|
ai-se/george
|
testRig.py
|
1
|
51380
|
from __future__ import division,print_function
import sys, random, math
import numpy as np
from sklearn.tree import DecisionTreeClassifier , DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from lib import *
from where2 import *
import Technix.sk as sk
import Technix.CoCoMo as CoCoMo
import Technix.sdivUtil as sdivUtil
from Technix.smote import smote
from Technix.batman import smotify
from Technix.TEAK import teak, leafTeak, teakImproved
from Technix.atlm import lin_reg
from Technix.atlm_pruned import lin_reg_pruned
from Models import *
MODEL = nasa93.nasa93
"""
Creates a generator of 1 test record
and rest training records
"""
def loo(dataset):
for index,item in enumerate(dataset):
yield item, dataset[:index]+dataset[index+1:]
"""
### Printing Stuff
Print without newline:
Courtesy @timm
"""
def say(*lst):
print(*lst,end="")
sys.stdout.flush()
def formatForCART(dataset,test,trains):
def indep(x):
rets=[]
indeps = x.cells[:len(dataset.indep)]
for i,val in enumerate(indeps):
if i not in dataset.ignores:
rets.append(val)
return rets
dep = lambda x: x.cells[len(dataset.indep)]
trainInputSet = []
trainOutputSet = []
for train in trains:
trainInputSet+=[indep(train)]
trainOutputSet+=[dep(train)]
return trainInputSet, trainOutputSet, indep(test), dep(test)
"""
Selecting the closest cluster and the closest row
"""
def clusterk1(score, duplicatedModel, tree, test, desired_effort, leafFunc):
test_leaf = leafFunc(duplicatedModel, test, tree)
nearest_row = closest(duplicatedModel, test, test_leaf.val)
test_effort = effort(duplicatedModel, nearest_row)
error = abs(desired_effort - test_effort)/desired_effort
#print("clusterk1", test_effort, desired_effort, error)
score += error
def cluster_nearest(model, tree, test, leaf_func):
test_leaf = leaf_func(model, test, tree)
nearest_row = closest(model, test, test_leaf.val)
return effort(model, nearest_row)
def clustermean2(score, duplicatedModel, tree, test, desired_effort, leafFunc):
test_leaf = leafFunc(duplicatedModel, test, tree)
nearestN = closestN(duplicatedModel, 2, test, test_leaf.val)
if (len(nearestN)==1) :
nearest_row = nearestN[0][1]
test_effort = effort(duplicatedModel, nearest_row)
error = abs(desired_effort - test_effort)/desired_effort
else :
test_effort = sum(map(lambda x:effort(duplicatedModel, x[1]), nearestN[:2]))/2
error = abs(desired_effort - test_effort)/desired_effort
score += error
def cluster_weighted_mean2(model, tree, test, leaf_func):
test_leaf = leaf_func(model, test, tree)
nearest_rows = closestN(model, 2, test, test_leaf.val)
wt_0 = nearest_rows[1][0]/(nearest_rows[0][0] + nearest_rows[1][0] + 0.000001)
wt_1 = nearest_rows[0][0]/(nearest_rows[0][0] + nearest_rows[1][0] + 0.000001)
return effort(model, nearest_rows[0][1]) * wt_0 + effort(model, nearest_rows[1][1]) * wt_1
def clusterWeightedMean2(score, duplicatedModel, tree, test, desired_effort, leafFunc):
test_leaf = leafFunc(duplicatedModel, test, tree)
nearestN = closestN(duplicatedModel, 2, test, test_leaf.val)
if (len(nearestN)==1) :
nearest_row = nearestN[0][1]
test_effort = effort(duplicatedModel, nearest_row)
error = abs(desired_effort - test_effort)/desired_effort
else :
nearest2 = nearestN[:2]
wt_0, wt_1 = nearest2[1][0]/(nearest2[0][0]+nearest2[1][0]+0.00001) , nearest2[0][0]/(nearest2[0][0]+nearest2[1][0]+0.00001)
test_effort = effort(duplicatedModel, nearest2[0][1])*wt_0 + effort(duplicatedModel, nearest2[1][1])*wt_1
#test_effort = sum(map(lambda x:effort(duplicatedModel, x[1]), nearestN[:2]))/2
error = abs(desired_effort - test_effort)/desired_effort
score += error
def clusterVasil(score, duplicatedModel, tree, test, desired_effort, leafFunc, k):
test_leaf = leafFunc(duplicatedModel, test, tree)
if k > len(test_leaf.val):
k = len(test_leaf.val)
nearestN = closestN(duplicatedModel, k, test, test_leaf.val)
if (len(nearestN)==1) :
nearest_row = nearestN[0][1]
test_effort = effort(duplicatedModel, nearest_row)
error = abs(desired_effort - test_effort)/desired_effort
else :
nearestk = nearestN[:k]
test_effort, sum_wt = 0,0
for dist, row in nearestk:
test_effort += (1/(dist+0.000001))*effort(duplicatedModel,row)
sum_wt += (1/(dist+0.000001))
test_effort = test_effort / sum_wt
error = abs(desired_effort - test_effort)/desired_effort
score += error
"""
Performing LinearRegression inside a cluster
to estimate effort
"""
def linRegressCluster(score, duplicatedModel, tree, test, desired_effort, leafFunc=leaf, doSmote=False):
def getTrainData(rows):
trainIPs, trainOPs = [], []
for row in rows:
#trainIPs.append(row.cells[:len(duplicatedModel.indep)])
trainIPs.append([row.cosine])
trainOPs.append(effort(duplicatedModel, row))
return trainIPs, trainOPs
def fastMapper(test_leaf, what = lambda duplicatedModel: duplicatedModel.decisions):
data = test_leaf.val
#data = smotify(duplicatedModel, test_leaf.val,k=5, factor=100)
one = any(data)
west = furthest(duplicatedModel,one,data, what = what)
east = furthest(duplicatedModel,west,data, what = what)
c = dist(duplicatedModel,west,east, what = what)
test_leaf.west, test_leaf.east, test_leaf.c = west, east, c
for one in data:
if c == 0:
one.cosine = 0
continue
a = dist(duplicatedModel,one,west, what = what)
b = dist(duplicatedModel,one,east, what = what)
x = (a*a + c*c - b*b)/(2*c) # cosine rule
one.cosine = x
def getCosine(test_leaf, what = lambda duplicatedModel: duplicatedModel.decisions):
if (test_leaf.c == 0):
return 0
a = dist(duplicatedModel,test,test_leaf.west, what = what)
b = dist(duplicatedModel,test,test_leaf.east, what = what)
return (a*a + test_leaf.c**2 - b*b)/(2*test_leaf.c) # cosine rule
test_leaf = leafFunc(duplicatedModel, test, tree)
#if (len(test_leaf.val) < 4) :
# test_leaf = test_leaf._up
if (len(test_leaf.val)>1) and doSmote:
data = smote(duplicatedModel, test_leaf.val,k=5, N=100)
linearRegression(score, duplicatedModel, data, test, desired_effort)
else :
fastMapper(test_leaf)
trainIPs, trainOPs = getTrainData(test_leaf.val)
clf = LinearRegression()
clf.fit(trainIPs, trainOPs)
test_effort = clf.predict(getCosine(test_leaf))
error = abs(desired_effort - test_effort)/desired_effort
score += error
"""
Performing LinearRegression over entire dataset
"""
def linearRegression(score, model, train, test, desired_effort):
def getTrainData(rows):
trainIPs, trainOPs = [], []
for row in rows:
trainRow=[]
for i,val in enumerate(row.cells[:len(model.indep)]):
if i not in model.ignores:
trainRow.append(val)
trainIPs.append(trainRow)
trainOPs.append(effort(model, row))
return trainIPs, trainOPs
trainIPs, trainOPs = getTrainData(train)
clf = LinearRegression()
clf.fit(trainIPs, trainOPs)
testIP=[]
for i,val in enumerate(test.cells[:len(model.indep)]):
if i not in model.ignores:
testIP.append(val)
test_effort = clf.predict(testIP)
error = abs(desired_effort - test_effort)/desired_effort
score += error
"""
Selecting K-nearest neighbors and finding the mean
expected effort
"""
def kNearestNeighbor(score, duplicatedModel, test, desired_effort, k=1, rows = None):
if rows == None:
rows = duplicatedModel._rows
nearestN = closestN(duplicatedModel, k, test, rows)
test_effort = sorted(map(lambda x:effort(duplicatedModel, x[1]), nearestN))[k//2]
score += abs(desired_effort - test_effort)/desired_effort
def knn_1(model, row, rest):
closest_1 = closestN(model, 1, row, rest)[0][1]
return effort(model, closest_1)
def knn_3(model, row, rest):
closest_3 = closestN(model, 3, row, rest)
a = effort(model, closest_3[0][1])
b = effort(model, closest_3[1][1])
c = effort(model, closest_3[2][1])
return (50*a + 33*b + 17*c)/100
"""
Classification and Regression Trees from sk-learn
"""
def CART(dataset, score, cartIP, test, desired_effort):
trainIp, trainOp, testIp, testOp = formatForCART(dataset, test,cartIP);
decTree = DecisionTreeRegressor(criterion="mse", random_state=1)
decTree.fit(trainIp,trainOp)
test_effort = decTree.predict(testIp)[0]
score += abs(desired_effort - test_effort)/desired_effort
def cart(model, row, rest):
train_ip, train_op, test_ip, test_op = formatForCART(model, row, rest)
dec_tree = DecisionTreeRegressor(criterion="mse", random_state=1)
dec_tree.fit(train_ip,train_op)
return dec_tree.predict([test_ip])[0]
def showWeights(model):
outputStr=""
i=0
for wt, att in sorted(zip(model.weights, model.indep)):
outputStr += att + " : " + str(round(wt,2))
i+=1
if i%5==0:
outputStr += "\n"
else:
outputStr += "\t"
return outputStr.strip()
def loc_dist(m,i,j,
what = lambda m: m.decisions):
"Euclidean distance 0 <= d <= 1 between decisions"
dec_index = what(m)[-1]
n1 = norm(m, dec_index, i.cells[dec_index])
n2 = norm(m, dec_index, j.cells[dec_index])
return abs(n1-n2)
def loc_closest_n(model, n, row, other_rows):
tmp = []
for other_row in other_rows:
if id(row) == id(other_row): continue
d = loc_dist(model, row, other_row)
tmp += [(d, other_row)]
return sorted(tmp)[:n]
def loc_1(model, row, rows):
closest_1 = loc_closest_n(model, 1, row, rows)[0][1]
return effort(model, closest_1)
def loc_3(model, row, rows):
closest_3 = loc_closest_n(model, 3, row, rows)
a = effort(model, closest_3[0][1])
b = effort(model, closest_3[1][1])
c = effort(model, closest_3[2][1])
return (50*a + 33*b + 17*c)/100
def productivity(model, row, rows):
loc_index = model.decisions[-1]
productivities = [effort(model, row)/one.cells[loc_index] for one in rows]
avg_productivity = sum(productivities)/len(productivities)
return avg_productivity*row.cells[loc_index]
def testRig(dataset=MODEL(),
doCART = False,doKNN = False, doLinRg = False):
scores=dict(clstr=N(), lRgCl=N())
if doCART:
scores['CARTT']=N();
if doKNN:
scores['knn_1'],scores['knn_3'],scores['knn_5'] = N(), N(), N()
if doLinRg:
scores['linRg'] = N()
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
tree = launchWhere2(dataset, rows=train, verbose=False)
n = scores["clstr"]
n.go and clusterk1(n, dataset, tree, test, desired_effort, leaf)
n = scores["lRgCl"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort)
if doCART:
CART(dataset, scores["CARTT"], train, test, desired_effort)
if doKNN:
n = scores["knn_1"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, k=1, rows=train)
n = scores["knn_3"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, k=3, rows=train)
n = scores["knn_5"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, k=5, rows=train)
if doLinRg:
n = scores["linRg"]
n.go and linearRegression(n, dataset, train, test, desired_effort)
return scores
def average_effort(model, rows):
efforts = []
for row in rows:
efforts.append(effort(model, row))
return sum(efforts)/len(efforts)
def effort_error(actual, computed, average):
return abs((actual**2 - computed**2)/(actual**2 - average**2))
#return actual - computed
#return abs((actual**2 - computed**2)/(actual**2))
#return abs(actual - computed)/actual
#return ((actual - computed)/(actual - average))**2
def sa(*args):
"""
Shepperd and MacDonell's standardized error.
SA = 1 - MAR/MARp0
:param args: [actual, predicted, vector]
:return: SA
"""
mar = abs(args[0] - args[1])
mar_p0 = sum([abs(random.choice(args[2])-args[0]) for _ in range(1000)])/1000
return mar/mar_p0
def msa(*args):
"""
Mean Standard Accuracy Error
:param args: [[actual vals], [predicted vals], [all effort]]
:return:
"""
errors = []
for actual, predicted in zip(args[0], args[1]):
errors.append(sa(actual, predicted, args[2]))
return np.median(errors)
"""
Test Rig to test CoCoMo for
a particular dataset
"""
def testCoCoMo(dataset=MODEL(), a=2.94, b=0.91):
coc_scores = dict(COCOMO2 = N(), COCONUT= N())
tuned_a, tuned_b = CoCoMo.coconut(dataset, dataset._rows)
for score in coc_scores.values():
score.go=True
for row, rest in loo(dataset._rows):
#say('.')
desired_effort = effort(dataset, row)
avg_effort = average_effort(dataset, rest)
test_effort = CoCoMo.cocomo2(dataset, row.cells, a, b)
test_effort_tuned = CoCoMo.cocomo2(dataset, row.cells, tuned_a, tuned_b)
#coc_scores["COCOMO2"] += ((desired_effort - test_effort) / (desired_effort - avg_effort))**2
coc_scores["COCOMO2"] += effort_error(desired_effort, test_effort, avg_effort)
#coc_scores["COCONUT"] += ((desired_effort - test_effort_tuned) / (desired_effort - avg_effort))**2
coc_scores["COCONUT"] += effort_error(desired_effort, test_effort_tuned, avg_effort)
return coc_scores
def pruned_coconut(model, row, rows, row_count, column_ratio, noise=None):
pruned_rows, columns = CoCoMo.prune_cocomo(model, rows, row_count, column_ratio)
a_tuned, b_tuned = CoCoMo.coconut(model, pruned_rows, decisions=columns, noise=noise)
return CoCoMo.cocomo2(model, row.cells, a=a_tuned, b=b_tuned, decisions=columns, noise=noise), pruned_rows
def testDriver():
seed(0)
skData = []
split = "median"
dataset=MODEL(split=split)
if dataset._isCocomo:
scores = testCoCoMo(dataset)
for key, n in scores.items():
skData.append([key+". ."] + n.cache.all)
scores = testRig(dataset=MODEL(split=split),doCART = True, doKNN=True, doLinRg=True)
for key,n in scores.items():
if (key == "clstr" or key == "lRgCl"):
skData.append([key+"(no tuning)"] + n.cache.all)
else:
skData.append([key+". ."] + n.cache.all)
scores = testRig(dataset=MODEL(split=split, weighFeature = True), doKNN=True)
for key,n in scores.items():
skData.append([key+"(sdiv_wt^1)"] + n.cache.all)
scores = dict(TEAK=N())
for score in scores.values():
score.go=True
dataset=MODEL(split=split)
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
tree = teak(dataset, rows = train)
n = scores["TEAK"]
n.go and clusterk1(n, dataset, tree, test, desired_effort, leafTeak)
for key,n in scores.items():
skData.append([key+". ."] + n.cache.all)
print("")
print(str(len(dataset._rows)) + " data points, " + str(len(dataset.indep)) + " attributes")
print("")
sk.rdivDemo(skData)
#launchWhere2(MODEL())
#testDriver()
def testKLOCWeighDriver():
dataset = MODEL(doTune=False, weighKLOC=True)
tuneRatio = 0.9
skData = [];
while(tuneRatio <= 1.2):
dataset.tuneRatio = tuneRatio
scores = testRig(dataset=dataset)
for key,n in scores.items():
skData.append([key+"( "+str(tuneRatio)+" )"] + n.cache.all)
tuneRatio += 0.01
print("")
sk.rdivDemo(skData)
#testKLOCWeighDriver()
def testKLOCTuneDriver():
tuneRatio = 0.9
skData = [];
while(tuneRatio <= 1.2):
dataset = MODEL(doTune=True, weighKLOC=False, klocWt=tuneRatio)
scores = testRig(dataset=dataset)
for key,n in scores.items():
skData.append([key+"( "+str(tuneRatio)+" )"] + n.cache.all)
tuneRatio += 0.01
print("")
sk.rdivDemo(skData)
#testKLOCTuneDriver()
#testRig(dataset=MODEL(doTune=False, weighKLOC=False), duplicator=interpolateNTimes)
def testOverfit(dataset= MODEL(split="median")):
skData = [];
scores= dict(splitSize_2=N(),splitSize_4=N(),splitSize_8=N())
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
tree = launchWhere2(dataset, rows=train, verbose=False, minSize=2)
n = scores["splitSize_2"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort)
tree = launchWhere2(dataset, rows=train, verbose=False, minSize=4)
n = scores["splitSize_4"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort)
tree = launchWhere2(dataset, rows=train, verbose=False, minSize=8)
n = scores["splitSize_8"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort)
for key,n in scores.items():
skData.append([key] + n.cache.all)
print("")
sk.rdivDemo(skData)
#testOverfit()
def testSmote():
dataset=MODEL(split="variance", weighFeature=True)
launchWhere2(dataset, verbose=False)
skData = [];
scores= dict(sm_knn_1_w=N(), sm_knn_3_w=N(), CART=N())
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
clones = smotify(dataset, train,k=5, factor=100)
n = scores["CART"]
n.go and CART(dataset, scores["CART"], train, test, desired_effort)
n = scores["sm_knn_1_w"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 1, clones)
n = scores["sm_knn_3_w"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 3, clones)
for key,n in scores.items():
skData.append([key] + n.cache.all)
if dataset._isCocomo:
for key,n in testCoCoMo(dataset).items():
skData.append([key] + n.cache.all)
scores= dict(knn_1=N(), knn_3=N())
dataset=MODEL(split="variance", weighFeature=True)
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
n = scores["knn_1_w"]
kNearestNeighbor(n, dataset, test, desired_effort, 1, train)
n = scores["knn_3_w"]
kNearestNeighbor(n, dataset, test, desired_effort, 3, train)
for key,n in scores.items():
skData.append([key] + n.cache.all)
scores= dict(knn_1_w=N(), knn_3_w=N())
dataset=MODEL(split="variance")
for test, train in loo(dataset._rows):
say(".")
desired_effort = effort(dataset, test)
n = scores["knn_1"]
kNearestNeighbor(n, dataset, test, desired_effort, 1, train)
n = scores["knn_3"]
kNearestNeighbor(n, dataset, test, desired_effort, 3, train)
for key,n in scores.items():
skData.append([key] + n.cache.all)
print("")
sk.rdivDemo(skData)
def testForPaper(model=MODEL):
split="median"
print(model.__name__.upper())
dataset=model(split=split, weighFeature=False)
print(str(len(dataset._rows)) + " data points, " + str(len(dataset.indep)) + " attributes")
dataset_weighted = model(split=split, weighFeature=True)
launchWhere2(dataset, verbose=False)
skData = []
if dataset._isCocomo:
for key,n in testCoCoMo(dataset).items():
skData.append([key] + n.cache.all)
scores = dict(CART = N(), knn_1 = N(),
knn_3 = N(), TEAK = N(),
vasil_2=N(), vasil_3=N(),
vasil_4=N(), vasil_5=N(),)
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
desired_effort = effort(dataset, test)
tree = launchWhere2(dataset, rows=train, verbose=False)
tree_teak = teak(dataset, rows = train)
#n = scores["LSR"]
#n.go and linearRegression(n, dataset, train, test, desired_effort)
n = scores["TEAK"]
n.go and clusterk1(n, dataset, tree_teak, test, desired_effort, leafTeak)
n = scores["CART"]
n.go and CART(dataset, scores["CART"], train, test, desired_effort)
n = scores["knn_1"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 1, train)
n = scores["knn_3"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 3, train)
for test, train in loo(dataset_weighted._rows):
desired_effort = effort(dataset, test)
tree_weighted, leafFunc = launchWhere2(dataset_weighted, rows=train, verbose=False), leaf
n = scores["vasil_2"]
n.go and clusterVasil(n, dataset_weighted, tree_weighted, test, desired_effort,leafFunc,2)
n = scores["vasil_3"]
n.go and clusterVasil(n, dataset_weighted, tree_weighted, test, desired_effort,leafFunc,3)
n = scores["vasil_4"]
n.go and clusterVasil(n, dataset_weighted, tree_weighted, test, desired_effort,leafFunc,4)
n = scores["vasil_5"]
n.go and clusterVasil(n, dataset_weighted, tree_weighted, test, desired_effort,leafFunc,5)
for key,n in scores.items():
skData.append([key] + n.cache.all)
print("")
sk.rdivDemo(skData)
print("");print("")
def testEverything(model = MODEL):
split="median"
print('###'+model.__name__.upper())
dataset=model(split=split, weighFeature=False)
print('####'+str(len(dataset._rows)) + " data points, " + str(len(dataset.indep)) + " attributes")
dataset_weighted = model(split=split, weighFeature=True)
launchWhere2(dataset, verbose=False)
skData = [];
scores= dict(TEAK=N(), linear_reg=N(), CART=N(),
wt_linRgCl=N(), wt_clstr_whr=N(),
linRgCl=N(), clstr_whr=N(),
t_wt_linRgCl=N(), t_wt_clstr_whr=N(),
knn_1=N(), wt_knn_1=N(),
clstrMn2=N(), wt_clstrMn2=N(), t_wt_clstrMn2=N(),
PEEKING2=N(), wt_PEEKING2=N(), t_wt_PEEKING2=N(),
t_clstr_whr=N(), t_linRgCl=N(), t_clstrMn2=N(),t_PEEKING2=N())
#scores= dict(TEAK=N(), linear_reg=N(), linRgCl=N())
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
#say(".")
desired_effort = effort(dataset, test)
tree = launchWhere2(dataset, rows=train, verbose=False)
tree_teak = teakImproved(dataset, rows = train)
n = scores["TEAK"]
n.go and clusterk1(n, dataset, tree_teak, test, desired_effort, leaf)
n = scores["linear_reg"]
n.go and linearRegression(n, dataset, train, test, desired_effort)
n = scores["clstr_whr"]
n.go and clusterk1(n, dataset, tree, test, desired_effort, leaf)
n = scores["linRgCl"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort, leaf)
n = scores["knn_1"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 1, train)
n = scores["clstrMn2"]
n.go and clustermean2(n, dataset, tree, test, desired_effort, leaf)
n = scores["PEEKING2"]
n.go and clusterWeightedMean2(n, dataset, tree, test, desired_effort, leaf)
n = scores["CART"]
n.go and CART(dataset, scores["CART"], train, test, desired_effort)
tree, leafFunc = teakImproved(dataset, rows=train, verbose=False),leaf
n = scores["t_clstr_whr"]
n.go and clusterk1(n, dataset, tree, test, desired_effort, leafFunc)
n = scores["t_linRgCl"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort, leafFunc=leafFunc)
n = scores["t_clstrMn2"]
n.go and clustermean2(n, dataset, tree, test, desired_effort, leafFunc)
n = scores["t_PEEKING2"]
n.go and clusterWeightedMean2(n, dataset, tree, test, desired_effort, leafFunc)
for test, train in loo(dataset_weighted._rows):
#say(".")
desired_effort = effort(dataset_weighted, test)
tree_weighted, leafFunc = launchWhere2(dataset_weighted, rows=train, verbose=False), leaf
n = scores["wt_clstr_whr"]
n.go and clusterk1(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["wt_linRgCl"]
n.go and linRegressCluster(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc=leafFunc)
n = scores["wt_clstrMn2"]
n.go and clustermean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["wt_PEEKING2"]
n.go and clusterWeightedMean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
tree_weighted, leafFunc = teakImproved(dataset_weighted, rows=train, verbose=False),leaf
n = scores["t_wt_clstr_whr"]
n.go and clusterk1(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["t_wt_linRgCl"]
n.go and linRegressCluster(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc=leafFunc)
n = scores["wt_knn_1"]
n.go and kNearestNeighbor(n, dataset_weighted, test, desired_effort, 1, train)
n = scores["t_wt_clstrMn2"]
n.go and clustermean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["t_wt_PEEKING2"]
n.go and clusterWeightedMean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
for key,n in scores.items():
skData.append([key] + n.cache.all)
if dataset._isCocomo:
for key,n in testCoCoMo(dataset).items():
skData.append([key] + n.cache.all)
print("\n####Attributes")
print("```")
print(showWeights(dataset_weighted))
print("```\n")
print("```")
sk.rdivDemo(skData)
print("```");print("")
def testTeakified(model = MODEL):
split="median"
print('###'+model.__name__.upper())
dataset=model(split=split, weighFeature=False)
print('####'+str(len(dataset._rows)) + " data points, " + str(len(dataset.indep)) + " attributes")
dataset_weighted = model(split=split, weighFeature=True)
launchWhere2(dataset, verbose=False)
skData = [];
scores= dict(linear_reg=N(), CART=N(),
linRgCl_wt=N(), clstr_whr_wt=N(),
linRgCl=N(), clstr_whr=N(),
knn_1=N(), knn_1_wt=N(),
clstrMn2=N(), clstrMn2_wt=N(),
PEEKING2=N(), PEEKING2_wt=N())
#scores= dict(TEAK=N(), linear_reg=N(), linRgCl=N())
for score in scores.values():
score.go=True
for test, train in loo(dataset._rows):
#say(".")
desired_effort = effort(dataset, test)
tree = teakImproved(dataset, rows=train, verbose=False)
n = scores["linear_reg"]
n.go and linearRegression(n, dataset, train, test, desired_effort)
n = scores["clstr_whr"]
n.go and clusterk1(n, dataset, tree, test, desired_effort, leaf)
n = scores["linRgCl"]
n.go and linRegressCluster(n, dataset, tree, test, desired_effort, leaf)
n = scores["knn_1"]
n.go and kNearestNeighbor(n, dataset, test, desired_effort, 1, train)
n = scores["clstrMn2"]
n.go and clustermean2(n, dataset, tree, test, desired_effort, leaf)
n = scores["PEEKING2"]
n.go and clusterWeightedMean2(n, dataset, tree, test, desired_effort, leaf)
n = scores["CART"]
n.go and CART(dataset, scores["CART"], train, test, desired_effort)
for test, train in loo(dataset_weighted._rows):
#say(".")
desired_effort = effort(dataset_weighted, test)
tree_weighted, leafFunc = teakImproved(dataset_weighted, rows=train, verbose=False), leaf
n = scores["clstr_whr_wt"]
n.go and clusterk1(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["linRgCl_wt"]
n.go and linRegressCluster(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc=leafFunc)
n = scores["clstrMn2_wt"]
n.go and clustermean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["PEEKING2_wt"]
n.go and clusterWeightedMean2(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc)
n = scores["knn_1_wt"]
n.go and kNearestNeighbor(n, dataset_weighted, test, desired_effort, 1, train)
for key,n in scores.items():
skData.append([key] + n.cache.all)
if dataset._isCocomo:
for key,n in testCoCoMo(dataset).items():
skData.append([key] + n.cache.all)
print("```")
sk.rdivDemo(skData)
print("```");print("")
def runAllModels(test_name):
models = [nasa93.nasa93, coc81.coc81, Mystery1.Mystery1, Mystery2.Mystery2,
albrecht.albrecht, kemerer.kemerer, kitchenham.kitchenham,
maxwell.maxwell, miyazaki.miyazaki, telecom.telecom, usp05.usp05,
china.china, cosmic.cosmic, isbsg10.isbsg10]
for model in models:
test_name(model)
def printAttributes(model):
dataset_weighted = model(split="median", weighFeature=True)
print('###'+model.__name__.upper())
print("\n####Attributes")
print("```")
print(showWeights(dataset_weighted))
print("```\n")
def testNoth(model=MODEL):
dataset_weighted = model(split="median", weighFeature=True)
launchWhere2(dataset_weighted, verbose=False)
skData = [];
scores= dict(t_wt_linRgCl_sm=N(), CART=N())
#scores= dict(TEAK=N(), linear_reg=N(), linRgCl=N())
for score in scores.values():
score.go=True
for test, train in loo(dataset_weighted._rows):
desired_effort = effort(dataset_weighted, test)
tree_weighted, leafFunc = launchWhere2(dataset_weighted, rows=train, verbose=False), leaf
n = scores["t_wt_linRgCl_sm"]
n.go and linRegressCluster(n, dataset_weighted, tree_weighted, test, desired_effort, leafFunc, doSmote=True)
n = scores["CART"]
n.go and CART(dataset_weighted, scores["CART"], train, test, desired_effort)
for key,n in scores.items():
skData.append([key] + n.cache.all)
print("```")
sk.rdivDemo(skData)
print("```");print("")
def test_sec4_1():
"""
Section 4.1
Cocomo vs LOC
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = dict(COCOMO2 = N(), COCONUT = N(), LOC1 = N(), LOC3 = N())
tuned_a, tuned_b = CoCoMo.coconut(model, model._rows)
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
loc1_effort = loc_1(model, row, rest)
loc3_effort = loc_3(model, row, rest)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["LOC1"] += effort_error(desired_effort, loc1_effort, avg_effort)
model_scores["LOC3"] += effort_error(desired_effort, loc3_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec4_2_standard():
"""
Section 4.2
Cocomo vs TEAK vs PEEKING2
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = dict(COCOMO2 = N(), COCONUT = N(), KNN1 = N(), KNN3 = N(), CART = N(), ATLM = N())
tuned_a, tuned_b = CoCoMo.coconut(model, model._rows)
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
knn1_effort = loc_1(model, row, rest)
knn3_effort = loc_3(model, row, rest)
cart_effort = cart(model, row, rest)
baseline_effort = lin_reg(model, row, rest)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["KNN1"] += effort_error(desired_effort, knn1_effort, avg_effort)
model_scores["KNN3"] += effort_error(desired_effort, knn3_effort, avg_effort)
model_scores["CART"]+= effort_error(desired_effort, cart_effort, avg_effort)
model_scores["ATLM"]+= effort_error(desired_effort, baseline_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec4_2_newer():
"""
Section 4.3
Choice of Statistical Ranking Methods
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = dict(COCOMO2 = N(), COCONUT = N(), TEAK = N(), PEEKING2 = N())
tuned_a, tuned_b = CoCoMo.coconut(model, model._rows)
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
tree_teak = teakImproved(model, rows=rest, verbose=False)
teak_effort = cluster_nearest(model, tree_teak, row, leafTeak)
tree = launchWhere2(model, rows=rest, verbose=False)
peeking_effort = cluster_weighted_mean2(model, tree, row, leaf)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["TEAK"] += effort_error(desired_effort, teak_effort, avg_effort)
model_scores["PEEKING2"] += effort_error(desired_effort, peeking_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec4_3():
"""
Section 4.3
Choice of Statistical Ranking Methods
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = {
"COCOMO2" : N(),
"COCONUT" : N(),
"COCONUT:c0.25,r4" : N(),
"COCONUT:c0.25,r8" : N(),
"COCONUT:c0.5,r4" : N(),
"COCONUT:c0.5,r8" : N(),
"COCONUT:c1,r4" : N(),
"COCONUT:c1,r8" : N(),
}
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
tuned_a, tuned_b = CoCoMo.coconut(model, rest)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 4, 0.25)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.25,r4"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.25)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.25,r8"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 4, 0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r4"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 4, 1)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c1,r4"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 1)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c1,r8"] += effort_error(desired_effort, pruned_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec4_4():
"""
Section 4.4
COCOMO with Incorrect Size Estimates
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = {
"COCOMO2" : N(),
"COCOMO2:n/2" : N(),
"COCOMO2:n/4" : N(),
"COCONUT:c0.5,r8" : N(),
"COCONUT:c0.5,r8,n/2" : N(),
"COCONUT:c0.5,r8,n/4" : N(),
}
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.5)
model_scores["COCOMO2:n/2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.25)
model_scores["COCOMO2:n/4"] += effort_error(desired_effort, cocomo_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,n/2"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=0.25)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,n/4"] += effort_error(desired_effort, pruned_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_baseline():
"""
Section 4.3
Choice of Statistical Ranking Methods
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = dict(COCOMO2 = N(),
COCONUT = N(),
TEAK = N(),
PEEKING2 = N(),
BASELINE = N())
tuned_a, tuned_b = CoCoMo.coconut(model, model._rows)
for score in model_scores.values():
score.go=True
actuals = []
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
actuals.append(desired_effort)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
tree_teak = teakImproved(model, rows=rest, verbose=False)
teak_effort = cluster_nearest(model, tree_teak, row, leafTeak)
tree = launchWhere2(model, rows=rest, verbose=False)
peeking_effort = cluster_weighted_mean2(model, tree, row, leaf)
baseline_effort = lin_reg(model, row, rest)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["TEAK"] += effort_error(desired_effort, teak_effort, avg_effort)
model_scores["PEEKING2"] += effort_error(desired_effort, peeking_effort, avg_effort)
model_scores["BASELINE"] += effort_error(desired_effort, baseline_effort, avg_effort)
print("### %s"%model_fn.__name__)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
# var_actuals = np.var(actuals)
# for key, n in model_scores.items():
# var_model = np.var(n.cache.all)
# sk_data.append((var_model/var_actuals,key))
# sk_data = sorted(sk_data)
# print("```")
# line = "----------------------------------------------------"
# print ('%4s , %22s , %s' % \
# ('rank', 'name', 'error')+ "\n"+ line)
# for index, (error, key) in enumerate(sk_data):
# print("%4d , %22s , %0.4f"%(index+1, key, error))
# print("```");print("")
def test_pruned_baseline():
"""
Section 4.4
COCOMO with Incorrect Size Estimates
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = {
"COCOMO2" : N(),
"COCONUT" : N(),
"BASELINE" : N(),
"P_BASELINE" : N(),
"CART" : N()
}
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
a_tuned, b_tuned = CoCoMo.coconut(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, a_tuned, b_tuned)
baseline_effort = lin_reg(model, row, rest)
baseline_pruned_effort = lin_reg_pruned(model, row, rest)
cart_effort = cart(model, row, rest)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["BASELINE"] += effort_error(desired_effort, baseline_effort, avg_effort)
model_scores["P_BASELINE"] += effort_error(desired_effort, baseline_pruned_effort, avg_effort)
model_scores["CART"] += effort_error(desired_effort, cart_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_pruned_baseline_continuous():
"""
Section 4.4
COCOMO with Incorrect Size Estimates
:param model:
:return:
"""
models = [albrecht.albrecht, kitchenham.kitchenham, maxwell.maxwell, miyazaki.miyazaki, china.china]
for model_fn in models:
model = model_fn()
model_scores = {
"BASELINE" : N(),
"P_BASELINE" : N(),
"CART" : N(),
"TEAK": N()
}
for score in model_scores.values():
score.go=True
print("### %s"%model_fn.__name__)
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
baseline_effort = lin_reg(model, row, rest)
baseline_pruned_effort = lin_reg_pruned(model, row, rest)
cart_effort = cart(model, row, rest)
tree_teak = teakImproved(model, rows=rest, verbose=False)
teak_effort = cluster_nearest(model, tree_teak, row, leafTeak)
model_scores["BASELINE"] += effort_error(desired_effort, baseline_effort, avg_effort)
model_scores["P_BASELINE"] += effort_error(desired_effort, baseline_pruned_effort, avg_effort)
model_scores["CART"] += effort_error(desired_effort, cart_effort, avg_effort)
model_scores["TEAK"] += effort_error(desired_effort, teak_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec_kloc_error():
"""
Section 4.4
COCOMO with Incorrect Size Estimates
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = {
"COCOMO2" : N(),
"COCOMO2:n/2" : N(),
"COCOMO2:n/4" : N(),
"COCOMO2:2*n" : N(),
"COCOMO2:4*n" : N(),
"COCONUT:c0.5,r8" : N(),
"COCONUT:c0.5,r8,n/2" : N(),
"COCONUT:c0.5,r8,n/4" : N(),
"COCONUT:c0.5,r8,2*n" : N(),
"COCONUT:c0.5,r8,4*n" : N(),
}
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.5)
model_scores["COCOMO2:n/2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.25)
model_scores["COCOMO2:n/4"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=2)
model_scores["COCOMO2:2*n"] += effort_error(desired_effort, cocomo_effort, avg_effort)
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=4)
model_scores["COCOMO2:4*n"] += effort_error(desired_effort, cocomo_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=0.5)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,n/2"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=0.25)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,n/4"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=2)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,2*n"] += effort_error(desired_effort, pruned_effort, avg_effort)
pruned_effort, pruned_rows = pruned_coconut(model, row, rest, 8, 0.5, noise=4)
avg_effort = average_effort(model, pruned_rows)
model_scores["COCONUT:c0.5,r8,4*n"] += effort_error(desired_effort, pruned_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_sec4_1_productivity():
"""
Updated Section 4.1
Cocomo vs LOC vs Productivity
:param model:
:return:
"""
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
model_scores = dict(COCOMO2 = N(), COCONUT = N(),
LOC1 = N(), LOC3 = N(),
PROD = N())
tuned_a, tuned_b = CoCoMo.coconut(model, model._rows)
for score in model_scores.values():
score.go=True
for row, rest in loo(model._rows):
#say('.')
desired_effort = effort(model, row)
avg_effort = average_effort(model, rest)
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
coconut_effort = CoCoMo.cocomo2(model, row.cells, tuned_a, tuned_b)
loc1_effort = loc_1(model, row, rest)
loc3_effort = loc_3(model, row, rest)
prod_effort = productivity(model, row, rest)
model_scores["COCOMO2"] += effort_error(desired_effort, cocomo_effort, avg_effort)
model_scores["COCONUT"] += effort_error(desired_effort, coconut_effort, avg_effort)
model_scores["LOC1"] += effort_error(desired_effort, loc1_effort, avg_effort)
model_scores["LOC3"] += effort_error(desired_effort, loc3_effort, avg_effort)
model_scores["PROD"] += effort_error(desired_effort, prod_effort, avg_effort)
sk_data = []
for key, n in model_scores.items():
sk_data.append([key] + n.cache.all)
print("### %s"%model_fn.__name__)
print("```")
sk.rdivDemo(sk_data)
print("```");print("")
def test_loc_paper():
models = [Mystery1.Mystery1, Mystery2.Mystery2, nasa93.nasa93, coc81.coc81]
for model_fn in models:
model = model_fn()
med_model_scores = {"COCOMO2": N(), "20%:COCOMO2": N(),
"40%:COCOMO2": N(), "60%:COCOMO2": N(),
"80%:COCOMO2": N(), "100%:COCOMO2": N()}
iqr_model_scores = {"COCOMO2": N(), "20%:COCOMO2": N(),
"40%:COCOMO2": N(), "60%:COCOMO2": N(),
"80%:COCOMO2": N(), "100%:COCOMO2": N()}
for score1, score2 in zip(med_model_scores.values(), iqr_model_scores.values()):
score1.go = True
score2.go = True
for row, rest in loo(model._rows):
# say('.')
all_efforts = [effort(model, one) for one in rest]
desired_effort = effort(model, row)
# 0 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["COCOMO2"] += errors.med()
iqr_model_scores["COCOMO2"] += errors.iqr()
# 20 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.2)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["20%:COCOMO2"] += errors.med()
iqr_model_scores["20%:COCOMO2"] += errors.iqr()
# 40 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.4)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["40%:COCOMO2"] += errors.med()
iqr_model_scores["40%:COCOMO2"] += errors.iqr()
# 60 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.6)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["60%:COCOMO2"] += errors.med()
iqr_model_scores["60%:COCOMO2"] += errors.iqr()
# 80 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=0.8)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["80%:COCOMO2"] += errors.med()
iqr_model_scores["80%:COCOMO2"] += errors.iqr()
# 100 %
errors = N()
for _ in xrange(20):
cocomo_effort = CoCoMo.cocomo2(model, row.cells, noise=1.0)
errors += sa(desired_effort, cocomo_effort, all_efforts)
med_model_scores["100%:COCOMO2"] += errors.med()
iqr_model_scores["100%:COCOMO2"] += errors.iqr()
med_sk_data, iqr_sk_data = [], []
for key in med_model_scores.keys():
med_sk_data.append([key] + med_model_scores[key].cache.all)
iqr_sk_data.append([key] + iqr_model_scores[key].cache.all)
print("### %s" % model_fn.__name__)
print("```")
sk.rdivDemo(med_sk_data)
print("")
sk.rdivDemo(iqr_sk_data)
print("```")
print("")
if __name__ == "__main__":
#testEverything(albrecht.albrecht)
#runAllModels(testEverything)
#testNoth(MODEL)
seed()
#test_sec4_4()
#test_baseline()
#test_pruned_baseline_continuous()
#test_sec_kloc_error()
#test_sec4_1_productivity()
# test_sec4_2_newer()
test_loc_paper()
|
mit
|
aiguofer/bokeh
|
bokeh/sampledata/gapminder.py
|
7
|
2828
|
''' Provide a pandas DataFrame instance of four of the datasets from gapminder.org.
These are read in from csv filess that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'gapminder sample data requires Pandas (http://pandas.pydata.org) to be installed')
from os.path import join
import sys
from . import _data_dir
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country', encoding='utf-8')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
|
vamsirajendra/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/patches.py
|
69
|
110325
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
import matplotlib.colors as colors
import matplotlib.transforms as transforms
from matplotlib.path import Path
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
artist.kwdocd['Patch'] = """
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
"""
class Patch(artist.Artist):
"""
A patch is a 2D thingy with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
def __str__(self):
return str(self.__class__).split('.')[-1]
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bézier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if callable(self._contains): return self._contains(self,mouseevent)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform())
return inside, {}
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
return artist.Artist.get_transform(self)
def get_patch_transform(self):
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None: aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.edgecolor']
self._edgecolor = color
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.facecolor']
self._facecolor = color
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None: w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None: ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self.fill = b
def get_fill(self):
'return whether fill is set'
return self.fill
def set_hatch(self, h):
"""
Set the hatching pattern
hatch can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
# - crossed
x - crossed diagonal
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching in that direction.
CURRENT LIMITATIONS:
1. Hatching is supported in the PostScript backend only.
2. Hatching is done with solid black lines of width 0.
ACCEPTS: [ '/' | '\\' | '|' | '-' | '#' | 'x' ]
"""
self._hatch = h
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('projecting')
gc.set_url(self._url)
gc.set_snap(self._snap)
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
renderer.draw_path(gc, tpath, affine, rgbFace)
#renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
artist.kwdocd['Patch'] = patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch'):
artist.kwdocd[k] = patchdoc
# define Patch.__init__ after the class so that the docstring can be
# auto-generated.
def __patch__init__(self,
edgecolor=None,
facecolor=None,
linewidth=None,
linestyle=None,
antialiased = None,
hatch = None,
fill=True,
**kwargs
):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None: linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None: linestyle = "solid"
if antialiased is None: antialiased = mpl.rcParams['patch.antialiased']
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.fill = fill
self._combined_transform = transforms.IdentityTransform()
if len(kwargs): artist.setp(self, **kwargs)
__patch__init__.__doc__ = cbook.dedent(__patch__init__.__doc__) % artist.kwdocd
Patch.__init__ = __patch__init__
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)"%(str(self.patch))
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._update_transform()
self._update()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r,g,b,a = colors.colorConverter.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho*r
g = rho*g
b = rho*b
self.set_facecolor((r,g,b,0.5))
self.set_edgecolor((r,g,b,0.5))
def _update_transform(self):
self._shadow_transform = transforms.Affine2D().translate(self._ox, self._oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
self._update_transform()
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
self._update_transform()
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height, **kwargs):
"""
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
self._rect_transform = transforms.BboxTransformTo(bbox)
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width==0 or self._height==0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)"%(self._numVertices,self._xy[0],self._xy[1])
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, xy):
self._orientation = xy
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, xy):
self._radius = xy
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
xy = np.asarray(xy, np.float_)
self._path = Path(xy)
self.set_closed(closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_closed(self):
return self._closed
def set_closed(self, closed):
self._closed = closed
xy = self._get_xy()
if closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy)>2 and (xy[0]==xy[-1]).all():
xy = xy[0:-1]
self._set_xy(xy)
def get_xy(self):
return self._path.vertices
def set_xy(self, vertices):
self._path = Path(vertices)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)"%(self.theta1,self.theta2)
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r,self.width = r,width
self.theta1,self.theta2 = theta1,theta2
# Inner and outer rings are connected unless the annulus is complete
delta=theta2-theta1
if abs((theta2-theta1) - 360) <= 1e-12:
theta1,theta2 = 0,360
connector = Path.MOVETO
else:
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1,theta2)
if width is not None:
# Partial annulus needs to draw the outter ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1]*float(r-width)/r
v = np.vstack([v1,v2,v1[0,:],(0,0)])
c = np.hstack([arc.codes,arc.codes,connector,Path.CLOSEPOLY])
c[len(arc.codes)]=connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices,[(0,0),arc.vertices[0,:],(0,0)]])
c = np.hstack([arc.codes,[connector,connector,Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= r
v += np.asarray(center)
self._path = Path(v,c)
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path( [
[ 0.0, 0.1 ], [ 0.0, -0.1],
[ 0.8, -0.1 ], [ 0.8, -0.3],
[ 1.0, 0.0 ], [ 0.8, 0.3],
[ 0.8, 0.1 ], [ 0.0, 0.1] ] )
def __init__( self, x, y, dx, dy, width=1.0, **kwargs ):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx**2+dy**2) or 1 # account for div by zero
cx = float(dx)/L
sx = float(dy)/L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False, \
head_width=None, head_length=None, shape='full', overhang=0, \
head_starts_at_zero=False,**kwargs):
"""
Constructor arguments
*length_includes_head*:
*True* if head is counted in calculating the length.
*shape*: ['full', 'left', 'right']
*overhang*:
distance that the arrow is swept back (0 overhang means
triangular shape).
*head_starts_at_zero*:
If *True*, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Valid kwargs are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx**2 + dy**2)
if length_includes_head:
length=distance
else:
length=distance+head_length
if not length:
verts = [] #display nothing if empty
else:
#start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0,0.0], #tip
[-hl, -hw/2.0], #leftmost
[-hl*(1-hs), -lw/2.0], #meets stem
[-length, -lw/2.0], #bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length/2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow*[1,-1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords=np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError, "Got unknown shape: %s" % shape
cx = float(dx)/distance
sx = float(dy)/distance
M = np.array([[cx, sx],[-sx,cx]])
verts = np.dot(coords, M) + (x+dx, y+dy)
Polygon.__init__(self, map(tuple, verts), **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
def __init__(self, figure, xytip, xybase, width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.figure = figure
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width*self.figure.dpi/72./2.
k2 = self.headwidth*self.figure.dpi/72./2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2-y1, x2-x1)
r = math.sqrt((y2-y1)**2. + (x2-x1)**2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(zip(xs, ys))
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1,y1,x2,y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1,y1,x2,y2,k = map(float, (x1,y1,x2,y2,k))
if y2-y1 == 0:
return x2, y2+k, x2, y2-k
elif x2-x1 == 0:
return x2+k, y2, x2-k, y2
m = (y2-y1)/(x2-x1)
pm = -1./m
a = 1
b = -2*y2
c = y2**2. - k**2.*pm**2./(1. + pm**2.)
y3a = (-b + math.sqrt(b**2.-4*a*c))/(2.*a)
x3a = (y3a - y2)/pm + x2
y3b = (-b - math.sqrt(b**2.-4*a*c))/(2.*a)
x3b = (y3b - y2)/pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)"%self.center
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
**kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self,ev):
if ev.x is None or ev.y is None: return False,{}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x*x + y*y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)"%(self.center[0],self.center[1],self.radius)
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
if 'resolution' in kwargs:
import warnings
warnings.warn('Circle is now scale free. Use CirclePolygon instead!', DeprecationWarning)
kwargs.pop('resolution')
self.radius = radius
Ellipse.__init__(self, xy, radius*2, radius*2, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.pop('fill')
if fill:
raise ValueError("Arc objects can not be filled")
kwargs['fill'] = False
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx*dx + dy*dy
D = x0*y1 - x1*y0
D2 = D*D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D*dy) / dr2
y = (-D*dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D*dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D*dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = thetas.keys()
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad), np.sin(theta1_rad)))
for theta in thetas:
if inside:
self._path = Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l,b,w,h = bbox.get_bounds()
r = Rectangle(xy=(l,b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None: r.set_transform(trans)
r.set_clip_on( False )
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles, leadingspace=2):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault) \
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av)) for an, av in args])
#adding quotes for now to work around tex bug treating '-' as itemize
_table.append([cls.__name__, "'%s'"%name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ","").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0/aspect_ratio, height/aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0), (x1, y0), (x1, y1), (x0, y1),
(x0, y0), (x0, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["square"] = Square
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
dx = (y1-y0)/2.
dxx = dx*.5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0+dxx, y0), (x1, y0), (x1, y1), (x0+dxx, y1),
(x0+dxx, y1+dxx), (x0-dx, y0+dx), (x0+dxx, y0-dxx), # arrow
(x0+dxx, y0), (x0+dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.RArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:,0] = 2*x0 + width - p.vertices[:,0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2.*pad, \
height + 2.*pad,
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
# Round corners are implemented as quadratic bezier. eg.
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0+dr, y0),
(x1-dr, y0),
(x1, y0), (x1, y0+dr),
(x1, y1-dr),
(x1, y1), (x1-dr, y1),
(x0+dr, y1),
(x0, y1), (x0, y1-dr),
(x0, y0+dr),
(x0, y0), (x0+dr, y0),
(x0+dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2.*pad - 2*dr, \
height + 2.*pad - 2*dr,
x0, y0 = x0-pad+dr, y0-pad+dr,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0),
(x0+dr, y0-dr), (x1-dr, y0-dr), (x1, y0),
(x1+dr, y0+dr), (x1+dr, y1-dr), (x1, y1),
(x1-dr, y1+dr), (x0+dr, y1+dr), (x0, y1),
(x0-dr, y1-dr), (x0-dr, y0+dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2.*pad - tooth_size, \
height + 2.*pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0-pad+tooth_size2, y0-pad+tooth_size2
x1, y1 = x0+width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx*.5* i for i in range(dsx_n*2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0, y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2, x1, x1 - tooth_size2, x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy*.5* i for i in range(dsy_n*2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx*.5* i for i in range(dsx_n*2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2, y1, y1 - tooth_size2, y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2, x0, x0 + tooth_size2, x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy*.5* i for i in range(dsy_n*2)] + \
[y0 + tooth_size2]
saw_vertices = zip(bottom_saw_x, bottom_saw_y) + \
zip(right_saw_x, right_saw_y) + \
zip(top_saw_x, top_saw_y) + \
zip(left_saw_x, left_saw_y) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
path = Path(saw_vertices)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""
A roundtooth(?) box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
cp = [Path.MOVETO] + ([Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2))
path = Path(saw_vertices, cp)
return path
_style_list["roundtooth"] = Roundtooth
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyBboxPatch(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: [ %(AvailableBoxstyles)s ]
"""
if boxstyle==None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
set_boxstyle.__doc__ = cbook.dedent(set_boxstyle.__doc__) % kwdoc
del kwdoc
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB, patchA=None, patchB=None, shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
#xy_display = patchA.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
#xy_display = patchB.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
left, right = split_path_inout(path, insideA)
path = right
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
left, right = split_path_inout(path, insideB)
path = left
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2)/2., (y1 + y2)/2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f*dy, y12 - f*dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
-math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
vertices.extend([(cx - self.rad * cosA, cy - self.rad * sinA),
(cx, cy),
(cx + self.rad * cosB, cy + self.rad * sinB)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA/180.*math.pi)
sinA = math.sin(self.angleA/180.*math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d*cosA, y1 + d*sinA))
d = self.armA
rounded.append((x1 + d*cosA, y1 + d*sinA))
if self.armB:
cosB = math.cos(self.angleB/180.*math.pi)
sinB = math.sin(self.angleB/180.*math.pi)
x_armB, y_armB = x2 + self.armB*cosB, y2 + self.armB*sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
d = dd - self.rad
rounded = [(xp + d*dx/dd, yp + d*dy/dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives the
path object along which the arrow will be drawn, and the
mutation_size, with which the amount arrow head and etc. will
be scaled. It returns a Path instance. The linewidth may be
used to adjust the the path so that it does not pass beyond
the given points.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:,1] = vertices[:,1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, closed = self.transmute(path_shrinked, linewidth,
mutation_size)
vertices, codes = path_mutated.vertices, path_mutated.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes), closed
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow at the begin point and the
at the end point. The returned path is not closed and only meant
to be stroked.
"""
def __init__(self, beginarrow=None, endarrow=None,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size of
the arrow relative to the *mutation scale*.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
super(ArrowStyle._Curve, self).__init__()
def _get_pad_projected(self, x0, y0, x1, y1, linewidth):
# when no arrow head is drawn
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad to account
# projection of the wedge
padx_projected = (.5*linewidth)
pady_projected = (.5*linewidth)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
return ddx, ddy
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow is goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad for account
# the overshooting of the projection of the wedge
padx_projected = (.5*linewidth / cos_t)
pady_projected = (.5*linewidth / sin_t)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
# offset for arrow wedge
dx, dy = dx / cp_distance * head_dist, dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1+ddx+dx1, y1+ddy+dy1),
(x1+ddx, y1++ddy),
(x1+ddx+dx2, y1+ddy+dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length**2 + head_width**2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
#ddxA, ddyA = self._get_pad_projected(x1, y1, x0, y0, linewidth)
ddxA, ddyA = 0., 0., #self._get_pad_projected(x1, y1, x0, y0, linewidth)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0. #self._get_pad_projected(x2, y2, x3, y3, linewidth)
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
vertices = np.concatenate([verticesA + [(x0+ddxA, y0+ddyA)],
path.vertices[1:-1],
[(x3+ddxB, y3+ddyB)] + verticesB])
codes = np.concatenate([codesA,
path.codes,
codesB])
p = Path(vertices, codes)
return p, False
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__( \
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__( \
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width )
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__( \
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["->"] = CurveB
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__( \
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["<->"] = CurveAB
_style_list["<->"] = CurveAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB= scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1+dx, y1+dy),
(x1, y1),
(x2, y2),
(x2+dx, y2+dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA*scaleA,
self.legnthA*scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB*scaleB,
self.lengthB*scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=None )
#_style_list["-["] = BracketB
_style_list["-["] = BracketB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(arrow_in, head_width/2.,
wm=.5)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out, tail_width/2.)
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length*.8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head, head_width/2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width*.5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width*.3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
patch_path2 = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.CURVE3, tail_start),
(Path.CURVE3, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyArrowPatch(%g,%g,%g,%g,%g,%g)" % tuple(self._q_bezier)
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
#self._draw_in_display_coordinate = True
kwdoc = dict()
kwdoc["AvailableArrowstyles"]=_pprint_styles(ArrowStyle._style_list)
kwdoc["AvailableConnectorstyles"]=_pprint_styles(ConnectionStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None: self._posA_posB[0] = posA
if posB is not None: self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle==None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle==None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ConnectionStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() medthod to retrieve the arrow path
in the disaply coord.
"""
_path = self.get_path_in_displaycoord()
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA,
shrinkB=self.shrinkB
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, closed = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth(),
self.get_mutation_aspect()
)
if not closed:
self.fill = False
return _path
def draw(self, renderer):
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
fill_orig = self.fill
path = self.get_path_in_displaycoord()
affine = transforms.IdentityTransform()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
renderer.draw_path(gc, path, affine, rgbFace)
self.fill = fill_orig
#renderer.close_group('patch')
|
agpl-3.0
|
chutsu/slam
|
slam_optimization/scripts/plot_ransac_data.py
|
1
|
1429
|
#!/usr/bin/env python2
import random
import numpy as np
import matplotlib.pylab as plt
# GLOBAL VARIABLES
OUTPUT_FILE = "ransac_sample.dat"
def frange(x, y, jump):
while x < y:
yield x
x += jump
def line_sample_data(n, x_start, x_end, m=20, c=10):
data = {"x": [], "y": [], "m": [], "c": []}
step_size = (x_end - x_start) / float(n)
x_data = list(frange(x_start, x_end, step_size))
for i in range(n):
y = m * x_data[i] + c
data["x"].append(np.random.normal(x_data[i], 1.0, 1)[0])
data["y"].append(np.random.normal(y, 1.0, 1)[0])
data["m"].append(m)
data["c"].append(c)
return data
def add_outliers(data, n):
x_min = min(data["x"])
x_max = max(data["x"])
y_min = min(data["y"])
y_max = max(data["y"])
for i in range(n):
data["x"].append(random.uniform(x_min, x_max))
data["y"].append(random.uniform(y_min, y_max))
return data
def save_data(data):
output_file = open(OUTPUT_FILE, "w")
output_file.write("x, y\n")
for i in range(len(data["x"])):
line = "{0}, {1}\n".format(data["x"][i], data["y"][i])
output_file.write(line)
output_file.close()
def plot_data(data):
plt.scatter(data["x"], data["y"])
plt.show()
if __name__ == "__main__":
data = line_sample_data(200, 0, 100)
data = add_outliers(data, 80)
save_data(data)
plot_data(data)
|
gpl-3.0
|
Barmaley-exe/scikit-learn
|
examples/cluster/plot_lena_segmentation.py
|
271
|
2444
|
"""
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
|
bsd-3-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/animation/image_slices_viewer.py
|
1
|
1923
|
"""
===================
Image Slices Viewer
===================
This example demonstrates how to scroll through 2D image slices of a 3D array.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class IndexTracker(object):
def __init__(self, ax, X):
self.ax = ax
ax.set_title('use scroll wheel to navigate images')
self.X = X
rows, cols, self.slices = X.shape
self.ind = self.slices//2
self.im = ax.imshow(self.X[:, :, self.ind])
self.update()
def onscroll(self, event):
print("%s %s" % (event.button, event.step))
if event.button == 'up':
self.ind = (self.ind + 1) % self.slices
else:
self.ind = (self.ind - 1) % self.slices
self.update()
def update(self):
self.im.set_data(self.X[:, :, self.ind])
ax.set_ylabel('slice %s' % self.ind)
self.im.axes.figure.canvas.draw()
fig, ax = plt.subplots(1, 1)
X = np.random.rand(20, 20, 40)
tracker = IndexTracker(ax, X)
fig.canvas.mpl_connect('scroll_event', tracker.onscroll)
pltshow(plt)
|
mit
|
cl4rke/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
183
|
6155
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/semi_supervised/plot_label_propagation_digits.py
|
55
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
qe-team/marmot
|
marmot/preprocessing/prepare_dataset.py
|
1
|
2823
|
import argparse
import sys, codecs, pickle
import numpy as np
import pandas as pd
import preprocess_wmt
import preprocess_ter
# prepare a dataset for the Machine Learning component
# sample call: python prepare_dataset.py -i test_data/training -v /home/chris/programs/word2vec/trunk/vectors.bin -o 'test-'
def array_to_df(array):
df = pd.DataFrame(array, index=range(array.shape[0]), columns=range(array.shape[1]))
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', type=str, required=True, help='input file -- sentences tagged with errors')
parser.add_argument('-v','--vector', type=str, required=True, help='vectors generated by word2vec in binary format')
parser.add_argument('-t', '--test', type=str, help='test data (file in the same format as input)')
parser.add_argument('-o', '--output', type=str, default='', help='output file prefix')
parser.add_argument('-p', '--preprocessor', type=str, default='xml', choices=['ter', 'xml'], help='output file: labels for test data')
args = parser.parse_args()
text_processor = None
if args.preprocessor == 'xml':
text_processor = preprocess_wmt.parse_src
elif args.preprocessor == 'ter':
text_processor = preprocess_ter.parse_ter_file
else:
text_processor = preprocess_wmt.parse_src
# TODO: all of the following code could be parallelized
train_features = text_processor(args.input, good_context=True)
train_tokens = [x[2] for x in train_features]
sentence_ids = [x[0] for x in train_features]
(train_vecs, train_labels) = get_features.get_features(args.vector, feature_array=train_features)
# Create dataframes
train_df = array_to_df(train_vecs)
train_df['sentence_id'] = pd.Series(sentence_ids, index=train_df.index)
train_df['token'] = pd.Series(train_tokens, index=train_df.index)
# add labels column to dataframe
train_df['label'] = pd.Series(train_labels, index=train_df.index)
# save dataframes as csv
train_df.to_csv(args.output + 'train.csv', encoding='utf-8')
# pickle train_features for later
with open('train_features.pickle', 'w') as out:
pickle.dump(train_features, out)
if args.test:
test_features = text_processor( args.test, good_context=True)
(test_vecs, test_labels) = get_features.get_features(args.vector, feature_array=test_features)
test_tokens = [x[2] for x in test_features]
test_df = array_to_df(test_vecs)
test_df['token'] = pd.Series(test_tokens, index=test_df.index)
test_df['label'] = pd.Series(test_labels, index=test_df.index)
test_df.to_csv(args.output + 'test.csv', encoding='utf-8')
sys.stderr.write("Finished preprocessing test/train data, and extracting vectors")
|
isc
|
arabenjamin/scikit-learn
|
sklearn/kernel_ridge.py
|
155
|
6545
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.