repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tools/tests/test_util.py | 7 | 16721 | import os
import locale
import codecs
import nose
import numpy as np
from numpy import iinfo
import pandas as pd
from pandas import (date_range, Index, _np_version_under1p9)
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C'])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = np.array([1, 1, 2, 2], dtype=np.int32)
expected2 = np.array([1, 2, 1, 2], dtype=np.int32)
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ['a', 'b', 'c']]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
# empty product (empty input):
result = cartesian_product([])
expected = []
tm.assert_equal(result, expected)
def test_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
msg = "Input must be a list-like of list-likes"
for X in invalid_inputs:
tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is None:
lang, enc = 'it_CH', 'UTF-8'
elif LOCALE_OVERRIDE == 'C':
lang, enc = 'en_US', 'ascii'
else:
lang, enc = LOCALE_OVERRIDE.split('.')
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with self.assertRaisesRegexp(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with self.assertRaisesRegexp(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = ['1.1', 2, 3]
expected = np.array([1.1, 2, 3], dtype=np.float64)
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
# Check to make sure numpy is new enough to run this test.
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(uint64).min, iinfo(uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
tm.assert_equal(series.dtype, dtype)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
roxana-lafuente/CursoAA | clase5.py | 1 | 2001 | from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Cargar conjunto de datos.
dataset = load_iris()
# Transformar a pandas dataframe para ver un resumen de los datos.
data = pd.DataFrame(dataset.data, columns=dataset.feature_names)
data['target'] = dataset['target']
data_train, data_test = train_test_split(data, test_size=0.3)
X = data_train[[u'sepal length (cm)', u'sepal width (cm)']]
y = data_train[u'target']
# Crear la figura
title = "Regularization vs No regularization"
fig = plt.figure(title)
ax = fig.add_subplot(1, 2, 1)
ax.set_title("No regularization")
h = .02 # Separacion entre los valores a generar
# No uso regularization
model = LogisticRegression(multi_class='ovr')
# Entrenamos nuestro clasificador con len(X) observaciones, cada una con su target y
model.fit(X, y)
x_min, x_max = X[u'sepal length (cm)'].min() - 0.05, X[u'sepal length (cm)'].max() + 0.05
y_min, y_max = X[u'sepal width (cm)'].min() - 0.05, X[u'sepal width (cm)'].max() + 0.05
# Generar valores entre N1 y N2 con separacion de h usando np.arange.
# Luego usar meshgrid:
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot
plt.scatter(X[u'sepal length (cm)'], X[u'sepal width (cm)'],
c=y, cmap=plt.cm.Oranges)
# Nombrar los ejes para entender mejor la figura.
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
# Real values
ax = fig.add_subplot(1, 2, 2)
ax.set_title("Regularization")
# TODO: Agregar el codigo para predecir con regularization.
# Probar con diferentes C.
# Nombrar los ejes para entender mejor la figura.
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
| gpl-3.0 |
StuartLittlefair/astropy | astropy/visualization/wcsaxes/tests/test_misc.py | 3 | 17207 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from packaging.version import Version
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame, RectangularFrame, RectangularFrame1D)
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.visualization.wcsaxes.transforms import CurvedTransform
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
TEX_UNAVAILABLE = not matplotlib.checkdep_usetex(True)
def teardown_function(function):
plt.close('all')
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initalization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@pytest.mark.parametrize('grid_type', ['lines', 'contours'])
def test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type):
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white', grid_type=grid_type)
with pytest.warns(None) as warning_lines:
plt.savefig(tmpdir.join('test.png').strpath)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
for w in warning_lines:
w_msg = str(w.message)
assert ('converting a masked element to nan' in w_msg or
'No contour levels were found within the data range' in w_msg or
'np.asscalar(a) is deprecated since NumPy v1.16' in w_msg or
'PY_SSIZE_T_CLEAN will be required' in w_msg)
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Frame banana not found'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
assert ax.get_xlabel() == 'Test x label'
assert ax.get_ylabel() == 'Test y label'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
def test_slicing_warnings(ignore_matplotlibrc, tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with pytest.warns(None) as warning_lines:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
# https://github.com/astropy/astropy/issues/9690
if 'PY_SSIZE_T_CLEAN' not in str(warning.message):
raise AssertionError(f'Unexpected warning: {warning}')
# Angle case
wcs3d = WCS(GAL_HEADER)
with pytest.warns(None) as warning_lines:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
# https://github.com/astropy/astropy/issues/9690
if 'PY_SSIZE_T_CLEAN' not in str(warning.message):
raise AssertionError(f'Unexpected warning: {warning}')
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
with pytest.warns(UserWarning, match='No contour levels were found within the data range'):
ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))
def test_iterate_coords(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d)
assert exc.value.args[0] == ("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2))
assert exc.value.args[0] == ("'slices' should have as many elements as "
"WCS has pixel dimensions (should be 3)")
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ['x', 'y']
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y'))
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x'))
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y'])
assert ax.frame_class is RectangularFrame
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x'))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ['x']
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y'))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None no
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None yes
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y'))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['FREQ', 'TIME']
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif('TEX_UNAVAILABLE')
def test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc('text', usetex=True)
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---MOL',
'CTYPE2': 'DEC--MOL',
'RADESYS': 'ICRS'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header['NAXIS1'] - 0.5)
ax.set_ylim(-0.5, header['NAXIS2'] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmpdir / 'plot.png')
@pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ['RA', 'Declination']
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---AIT',
'CTYPE2': 'DEC--AIT'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize('atol', [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail("Exact BoundingBox dimensions are only ensured with FreeType 2.6.1")
assert np.allclose(ax_bbox.extents, extents, atol=atol)
| bsd-3-clause |
ywcui1990/htmresearch | htmresearch/frameworks/union_temporal_pooling/activation/plotExciteDecayFunctions.py | 12 | 1957 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy
"""
This script plots different activation and decay functions and saves the
resulting figures to a pdf document "excitation_decay_functions.pdf"
"""
with PdfPages('excitation_decay_functions.pdf') as pdf:
plt.figure()
plt.subplot(2,2,1)
from union_temporal_pooling.activation.excite_functions.excite_functions_all import (
LogisticExciteFunction)
self = LogisticExciteFunction()
self.plot()
plt.xlabel('Predicted Input #')
from union_temporal_pooling.activation.decay_functions.decay_functions_all import (
ExponentialDecayFunction)
plt.subplot(2,2,2)
self = ExponentialDecayFunction(10.0)
self.plot()
pdf.savefig()
plt.close()
# from union_temporal_pooling.activation.decay_functions.logistic_decay_function import (
# LogisticDecayFunction)
# plt.figure()
# self = LogisticDecayFunction(10.0)
# self.plot()
# pdf.savefig()
# plt.close()
| agpl-3.0 |
Obus/scikit-learn | sklearn/tests/test_lda.py | 71 | 5883 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
| bsd-3-clause |
iyounus/incubator-systemml | src/main/python/tests/test_mllearn_df.py | 4 | 5381 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.context import SparkContext
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
sc = SparkContext()
sparkSession = SparkSession.builder.getOrCreate()
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
# Convert to DataFrame for i/o: current way to transfer data
logistic = LogisticRegression(sparkSession, transferUsingDF=True)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pianomania/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/geopm/package.py | 5 | 5196 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Geopm(AutotoolsPackage):
"""GEOPM is an extensible power management framework targeting HPC.
The GEOPM package provides libgeopm, libgeopmpolicy and applications
geopmctl and geopmpolicy, as well as tools for postprocessing.
GEOPM is designed to be extended for new control algorithms and new
hardware power management features via its plugin infrastructure.
Note: GEOPM interfaces with hardware using Model Specific Registers (MSRs).
For propper usage make sure MSRs are made available directly or via the
msr-safe kernel module by your administrator."""
homepage = "https://geopm.github.io"
url = "https://github.com/geopm/geopm/releases/download/v1.0.0/geopm-1.0.0.tar.gz"
git = "https://github.com/geopm/geopm.git"
# Add additional proper versions and checksums here. "spack checksum geopm"
version('develop', branch='dev')
version('master', branch='master')
version('1.1.0', sha256='5f9a4df37ef0d64c53d64829d46736803c9fe614afd8d2c70fe7a5ebea09f88e')
version('1.0.0', sha256='24fe72265a7e44d62bdfe49467c49f0b7a649131ddda402d763c00a49765e1cb')
version('0.6.1', sha256='0ca42853f90885bf213df190c3462b8675c143cc843aee0d8b8a0e30802b55a9')
version('0.6.0', sha256='95ccf256c2b7cb35838978152479569d154347c3065af1639ed17be1399182d3')
version('0.5.1', sha256='db247af55f7000b6e4628af099956349b68a637500b9d4fe8d8fb13687124d53')
version('0.5.0', sha256='cdc123ea68b6d918dcc578a39a7a38275a5d711104364eb889abed15029f4060')
version('0.4.0', sha256='7d165f5a5fe0f19ca586bd81a4631202effb542e9d762cc9cc86ad6ef7afcad9')
version('0.3.0', sha256='73b45d36e7d2431d308038fc8c50a521a1d214c5ce105a17fba440f28509d907')
# Variants reflecting most ./configure --help options
variant('debug', default=False, description='Enable debug.')
variant('coverage', default=False, description='Enable test coverage support, enables debug by default.')
variant('overhead', default=False, description='Enable GEOPM to calculate and display time spent in GEOPM API calls.')
variant('procfs', default=True, description='Enable procfs (disable for OSes not using procfs).')
variant('mpi', default=True, description='Enable MPI dependent components.')
variant('fortran', default=True, description='Build fortran interface.')
variant('doc', default=True, description='Create man pages with ruby-ronn.')
variant('openmp', default=True, description='Build with OpenMP.')
variant('ompt', default=False, description='Use OpenMP Tools Interface.')
variant('gnu-ld', default=False, description='Assume C compiler uses gnu-ld.')
# Added dependencies.
depends_on('ruby-ronn', type='build', when='+doc')
depends_on('doxygen', type='build', when='+doc')
depends_on('[email protected]:', when='+mpi')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('ruby-ronn', type='build', when='+doc')
depends_on('doxygen', type='build', when='+doc')
depends_on('numactl', when="@:1.0.0-rc2")
depends_on('mpi', when='+mpi')
depends_on('[email protected]', when='@:0.5.1+hwloc')
depends_on('json-c', when='@:0.9.9')
depends_on('[email protected]:', when="@1.0.0:", type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:3.5.2', when="@1.0.0:", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when="@1.0.0:", type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when="@1.0.0:", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
depends_on('[email protected]', when="@:1.0.0-rc2", type=('build', 'run'))
depends_on('[email protected]:', when="@1.1.0:", type=('build', 'run'))
parallel = False
def autoreconf(self, spec, prefix):
bash = which("bash")
bash('./autogen.sh')
def configure_args(self):
args = []
args.extend(self.enable_or_disable('debug'))
args.extend(self.enable_or_disable('coverage'))
args.extend(self.enable_or_disable('overhead'))
args.extend(self.enable_or_disable('procfs'))
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('fortran'))
args.extend(self.enable_or_disable('doc'))
args.extend(self.enable_or_disable('openmp'))
args.extend(self.enable_or_disable('ompt'))
args.extend(self.with_or_without('gnu-ld'))
return args
| lgpl-2.1 |
zhenv5/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
mbdebian/ml-playground | real_world_machine_learning/playground/chapter3.py | 1 | 7893 | #
# Author : Manuel Bernal Llinares
# Project : ml-playground
# Timestamp : 18-10-2017 16:11
# ---
# © 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
Scratchpad for the chapter 3 from the book
"""
import pylab
import pandas
import numpy as np
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
# Helper functions
# Categorical-to-numerical function from chapter 2 changed to automatically add column names
def cat_to_num(data):
categories = np.unique(data)
features = {}
for cat in categories:
binary = (data == cat)
features["{}_{}".format(data.name, cat)] = binary.astype("int")
return pandas.DataFrame(features)
def prepare_data(data):
"""
Takes a dataframe of raw data and returns ML model features
:param data: dataframe of raw data
:return: ML model features
"""
# Initially, we build a model only no the available numerical values
features_drop_list = ["PassengerId", "Survived", "Fare", "Name", "Sex", "Ticket", "Cabin", "Embarked"]
print("+++> Drop features: {}".format(features_drop_list))
features = data.drop(features_drop_list, axis=1)
# Setting missing age values to -1
print("+++> Fix missing 'Age' values, filling them with '-1'")
features["Age"] = data["Age"].fillna(-1)
# Adding the sqrt of the fare feature
print("+++> Change 'Fare' for its square root value")
features["sqrt_Fare"] = np.sqrt(data["Fare"])
# Adding gender categorical value
print("+++> Convert 'Sex' categorical data")
features = features.join(cat_to_num(data["Sex"]))
# Adding Embarked categorical value
print("+++> Convert 'Embarked' categorical data")
features = features.join(cat_to_num(data["Embarked"]))
# ML model features are now ready
return features
# Read the Titanic sample data
# The data is English localized, but pandas will fail trying to convert numbers into float because this system is
# Spanish localized
sample_data_titanic = pandas.read_csv("../book_code/data/titanic.csv")
print("---> Sample data - Titanic, #{} entries".format(len(sample_data_titanic)))
print("... Sample ...\n"
"{}\n"
"... END of Sample ...".format(sample_data_titanic[:5]))
# Data fix (for numpy 'unique') - We know there is missing 'Embarked' data that, when run through 'unique', is
# interpreted as 'float', but 'Embark' is 'str', so we're gonna change that
sample_data_titanic['Embarked'].fillna("missing", inplace=True)
# We make a 80/20% train/test split of the data
data_train = sample_data_titanic[:int(0.8 * len(sample_data_titanic))]
data_test = sample_data_titanic[int(0.8 * len(sample_data_titanic)):]
# ---> Logistic Regression Model <---
# ML training model
ml_training_model = prepare_data(data_train)
print("---> ML Training model\n"
"... SAMPLE ...\n"
"{}\n"
"... END of SAMPLE".format(ml_training_model[:5]))
model = LogisticRegression()
model.fit(ml_training_model, data_train['Survived'])
# Make predictions
model_predictions_on_test_data = model.predict(prepare_data(data_test))
print("[--- Logistic Regression ---]")
print("---> Model predictions on test data:\n{}".format(model_predictions_on_test_data))
# Compute the accuracy of the model on the test data
model_score_on_test_data = model.score(prepare_data(data_test), data_test['Survived'])
print("---> Model Accuracy on test data:\n{}".format(model_score_on_test_data))
# ---> Non-linear model with Support Vector Machines <---
print("[--- SVC ---]")
model_svc = SVC()
model_svc.fit(ml_training_model, data_train['Survived'])
model_svc_score_on_test_data = model_svc.score(prepare_data(data_test), data_test['Survived'])
print("---> Model Accuracy on test data:\n{}".format(model_svc_score_on_test_data))
# ---> Classification with multiple classes: hand-written digits <---
print("[--- MNIST Small Dataset (KNN Classifier) ---]")
mnist_dataset = pandas.read_csv("../book_code/data/mnist_small.csv")
mnist_train = mnist_dataset[:int(0.8 * len(mnist_dataset))]
mnist_test = mnist_dataset[int(0.8 * len(mnist_dataset)):]
print("---> MNIST dataset contains #{} entries, #{} for training and #{} for testing"
.format(len(mnist_dataset),
len(mnist_train),
len(mnist_test)))
# Instantiate the classifier
print("+++> Get an instance of the classifier, n_neighbors = 10")
knn = KNeighborsClassifier(n_neighbors=10)
# Train the classifier by dropping the 'label' column (which is the classification target)
print("+++> Fit the classifier")
knn.fit(mnist_train.drop('label', axis=1), mnist_train['label'])
# Predictions
knn_mnist_predictions = knn.predict(mnist_test.drop('label', axis=1))
print("---> Classifier Predictions for test data\n{}".format(knn_mnist_predictions))
# Predictions with probabilities
knn_mnist_predictions_with_probabilities = knn.predict_proba(mnist_test.drop('label', axis=1))
knn_mnist_predictions_with_probabilities_sample = \
pandas.DataFrame(knn_mnist_predictions_with_probabilities[:20], index=["Digit {}".format(i + 1) for i in range(20)])
print("---> Classifier Predictions for test data\n{}".format(knn_mnist_predictions_with_probabilities_sample))
# Compute the KNN Classifier score
print("---> Classifier Score on the test data, {}"
.format(knn.score(mnist_test.drop('label', axis=1), mnist_test['label'])))
# ---> Predicting numerical values with a regression model <---
print("[--- Auto MPG Dataset (Linear Regression) ---]")
auto_dataset = pandas.read_csv("../book_code/data/auto-mpg.csv")
# Convert origin from categorical to numerical
auto = auto_dataset.join(cat_to_num(auto_dataset['origin']))
auto = auto.drop('origin', axis=1)
# Split train / test
auto_train = auto[:int(0.8 * len(auto))]
auto_test = auto[int(0.8 * len(auto)):]
print("---> Auto MPG Dataset contains #{} entries, #{} for training and #{} for testing"
.format(len(auto),
len(auto_train),
len(auto_test)))
print("---> Auto MPG Dataset Sample\n{}".format(auto[:20]))
linear_regression = LinearRegression()
print("+++> Fit the linear regressor")
linear_regression.fit(auto_train.drop('mpg', axis=1), auto_train['mpg'])
# Predictions from the linear regressor
print("+++> Compute predictions with the linear regressor")
linear_regression_predictions = linear_regression.predict(auto_test.drop('mpg', axis=1))
print("---> Linear regressor predictions sample\n{}".format(linear_regression_predictions[:10]))
print("---> Linear regressor accuracy: {}"
.format(linear_regression.score(auto_test.drop('mpg', axis=1), auto_test.mpg)))
print("---> Min MPG (test dataset) {}, Max MPG (test dataset) {}".format(np.min(auto_test.mpg), np.max(auto_test.mpg)))
# Plotting the prediction from the linear regressor
pylab.plot(auto_test.mpg, linear_regression_predictions, 'o')
x = pylab.linspace(10, 40, 5)
pylab.plot(x, x, '-')
# Now with Random Forest
print("[--- Auto MPG Dataset (Random Forest Regression) ---]")
random_forest_regressor = RandomForestRegressor()
print("+++> Fit the random forest regressor")
random_forest_regressor.fit(auto_train.drop('mpg', axis=1), auto_train['mpg'])
print("+++> Compute predictions with the random forest regressor")
random_forest_regressor_predictions = random_forest_regressor.predict(auto_test.drop('mpg', axis=1))
print("---> Random Forest regressor predictions sample\n{}".format(random_forest_regressor_predictions[:10]))
print("---> Random Fores accuracy: {}"
.format(random_forest_regressor.score(auto_test.drop('mpg', axis=1), auto_test.mpg)))
pylab.figure()
pylab.plot(auto_test.mpg, random_forest_regressor_predictions, 'o')
x = pylab.linspace(10, 40, 5)
pylab.plot(x, x, '-')
# Show all plots
pylab.show()
print("<{} END {}>".format("-" * 20, "-" * 20))
| apache-2.0 |
jungla/ICOM-fluidity-toolbox | 2D/U/plot_drate_z_resolutions.py | 1 | 6030 | import os, sys
import myfun
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import lagrangian_stats
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
label = 'm_50_6f'
label_50 = 'm_50_6f'
label_25 = 'm_25_1'
label_10 = 'm_10_1'
basename = 'mli'
dayi = 24
dayf = 49
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = './Velocity_CG/'
# dimensions archives
# ML exp
Xlist_50 = np.linspace(0,2000,41)
Ylist_50 = np.linspace(0,2000,41)
Xlist_25 = np.linspace(0,2000,81)
Ylist_25 = np.linspace(0,2000,81)
Xlist_10 = np.linspace(0,2000,161)
Ylist_10 = np.linspace(0,2000,161)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = -1*np.cumsum(dl)
xn_50 = len(Xlist_50)
yn_50 = len(Ylist_50)
xn_25 = len(Xlist_25)
yn_25 = len(Ylist_25)
xn_10 = len(Xlist_10)
yn_10 = len(Ylist_10)
zn = len(Zlist)
dx_50 = np.gradient(Xlist_50)
dx_25 = np.gradient(Xlist_25)
dx_10 = np.gradient(Xlist_10)
dz_50 = np.gradient(Zlist)
dz_25 = np.gradient(Zlist)
dz_10 = np.gradient(Zlist)
time = range(dayi,dayf,days)
FW_50 = np.zeros((yn_50,xn_50,zn,len(range(dayi,dayf,days))))
FW_25 = np.zeros((yn_25,xn_25,zn,len(range(dayi,dayf,days))))
FW_10 = np.zeros((yn_10,xn_10,zn,len(range(dayi,dayf,days))))
mld_50 = np.zeros(len(range(dayi,dayf,days)))
mld_25 = np.zeros(len(range(dayi,dayf,days)))
mld_10 = np.zeros(len(range(dayi,dayf,days)))
for t in range(len(time)):
print 'time:', time[t]
tlabel = str(time[t])
while len(tlabel) < 3: tlabel = '0'+tlabel
#Velocity_CG_m_50_6e_9.csv
file0_50 = path+'Velocity_CG_0_'+label_50+'_'+str(time[t])+'.csv'
file0_25 = path+'Velocity_CG_0_'+label_25+'_'+str(time[t])+'.csv'
file0_10 = path+'Velocity_CG_0_'+label_10+'_'+str(time[t])+'.csv'
file1 = 'drate_'+label+'_'+str(time[t])
file1_50 = 'drate_'+label_50
file1_25 = 'drate_'+label_25
file1_10 = 'drate_'+label_10
W_50 = lagrangian_stats.read_Scalar(file0_50,zn,yn_50,xn_50)
W_25 = lagrangian_stats.read_Scalar(file0_25,zn,yn_25,xn_25)
W_10 = lagrangian_stats.read_Scalar(file0_10,zn,yn_10,xn_10)
for i in range(len(Xlist_50)):
for j in range(len(Ylist_50)):
FW_50[j,i,:,t] = (np.gradient(W_50[:,j,i])/dz_50)**2
for i in range(len(Xlist_25)):
for j in range(len(Ylist_25)):
FW_25[j,i,:,t] = (np.gradient(W_25[:,j,i])/dz_25)**2
for i in range(len(Xlist_10)):
for j in range(len(Ylist_10)):
FW_10[j,i,:,t] = (np.gradient(W_10[:,j,i])/dz_10)**2
plt.figure(figsize=(4,8))
FW_t50 = np.mean(np.mean(FW_50,0),0)
FW_t25 = np.mean(np.mean(FW_25,0),0)
FW_t10 = np.mean(np.mean(FW_10,0),0)
p50, = plt.semilogx(7.5*0.05*FW_t50[:,t],Zlist,'k-',linewidth=2)
p25, = plt.semilogx(7.5*0.05*FW_t25[:,t],Zlist,'k--',linewidth=2)
p10, = plt.semilogx(7.5*0.05*FW_t10[:,t],Zlist,'k.-',linewidth=2)
plt.legend([p50,p25,p10],['50m','25m','10m'],loc=4)
plt.ylabel(r'depth $[m]$',fontsize=18)
plt.xlabel(r'$\epsilon [m^2s^{-2}]$',fontsize=18)
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1+'.eps'
plt.close()
#
path_T = '../RST/Temperature_CG/'
file0_50 = path_T+'Temperature_CG_'+label_50+'_'+str(time[t])+'.csv'
file0_25 = path_T+'Temperature_CG_'+label_25+'_'+str(time[t])+'.csv'
file0_10 = path_T+'Temperature_CG_'+label_10+'_'+str(time[t])+'.csv'
T_10 = lagrangian_stats.read_Scalar(file0_10,zn,yn_10,xn_10)
T_25 = lagrangian_stats.read_Scalar(file0_25,zn,yn_25,xn_25)
T_50 = lagrangian_stats.read_Scalar(file0_50,zn,yn_50,xn_50)
mld_t = []
for x in range(len(Xlist_50)):
for y in range(len(Ylist_50)):
ml = T_50[:,x,y]
mls = np.cumsum(ml)/range(1,len(ml)+1)
mlst, = np.where(mls>=ml)
mld_t.append(Zlist[mlst[len(mlst)-1]])
mld_50[t] = np.mean(mld_t)
mld_t = []
for x in range(len(Xlist_25)):
for y in range(len(Ylist_25)):
ml = T_25[:,x,y]
mls = np.cumsum(ml)/range(1,len(ml)+1)
mlst, = np.where(mls>=ml)
mld_t.append(Zlist[mlst[len(mlst)-1]])
mld_25[t] = np.mean(mld_t)
mld_t = []
for x in range(len(Xlist_10)):
for y in range(len(Ylist_10)):
ml = T_10[:,x,y]
mls = np.cumsum(ml)/range(1,len(ml)+1)
mlst, = np.where(mls>=ml)
mld_t.append(Zlist[mlst[len(mlst)-1]])
mld_10[t] = np.mean(mld_t)
FW_m = -9
FW_M = -5
plt.figure(figsize=(8,4))
plt.contourf(time,Zlist,np.log10(FW_t50),np.linspace(FW_m,FW_M,30),extend='both')
plt.colorbar()
plt.plot(time,mld_50,'k')
plt.xlabel('Time',fontsize=18)
plt.ylabel('Depth',fontsize=18)
plt.xlim([24,48])
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_50+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_50+'.eps'
plt.close()
###
plt.figure(figsize=(8,4))
plt.contourf(time,Zlist,np.log10(FW_t25),np.linspace(FW_m,FW_M,30),extend='both')
plt.colorbar()
plt.plot(time,mld_25,'k')
plt.xlabel('Time',fontsize=18)
plt.ylabel('Depth',fontsize=18)
plt.xlim([24,48])
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_25+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_25+'.eps'
plt.close()
plt.figure(figsize=(8,4))
plt.contourf(time,Zlist,np.log10(FW_t10),np.linspace(FW_m,FW_M,30),extend='both')
plt.colorbar()
plt.plot(time,mld_10,'k')
plt.xlabel('Time',fontsize=18)
plt.ylabel('Depth',fontsize=18)
plt.xlim([24,48])
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/'+file1_10+'.eps',bbox_inches='tight')
print './plot/'+label+'/'+file1_10+'.eps'
plt.close()
| gpl-2.0 |
Adai0808/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
badele/SDRHunter | SDRHunter/SDRHunter.py | 2 | 28783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = 'Bruno Adelé <[email protected]>'
__copyright__ = 'Copyright (C) 2014 Bruno Adelé'
__description__ = """Tools for searching the radio of signal"""
__license__ = 'GPL'
__version__ = '0.0.1'
import os
import sys
import json
import shlex
import time
import pprint
import argparse
import subprocess
#from collections import OrderedDict
#import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from tabulate import tabulate
import commons
# Todo: In searchstations, save after Nb Loop
# TODO: rename range into freqs_range
# TODO: search best bandwith for windows and 1s
# TODO: Optimise call function, ex: scan,zoomedscan, gensummaries, etc ...as
# TODO: Analyse if zoomedscan must merge with scan function
# Unit conversion
HzUnities = {'M': 1e6, 'k': 1e3}
secUnities = {'s': 1, 'm': 60, 'h': 3600}
# Class for terminal Color
class tcolor:
DEFAULT = "\033[0m"
BOLD = "\033[1m"
RED = "\033[0;1;31;40m"
GREEN = "\033[0;1;32;40m"
BLUE = "\033[0;1;36;40m"
ORANGE = "\033[0;1;33;40m"
MAGENTA = "\033[0;1;36;40m"
RESET = "\033[2J\033[H"
BELL = "\a"
def showVerbose(config, mess):
if config['global']['verbose']:
print mess
def loadJSON(filename):
exists = os.path.isfile(filename)
if exists:
configlines = open(filename).read()
content = json.loads(configlines)
return content
return None
def saveJSON(filename,content):
with open(filename, 'w') as f:
jsontext = json.dumps(
content, sort_keys=True,
indent=4, separators=(',', ': ')
)
f.write(jsontext)
f.close()
def loadStations(filename):
stations = loadJSON(filename)
if not stations:
stations = {'stations': []}
return stations
def calcFilename(scanlevel, start, gain):
filename = "%sHz-%sHz-%07.2fdB-%sHz-%s-%s" % (
commons.float2Hz(start, 3, True),
commons.float2Hz(start + scanlevel['windows'], 3, True),
gain,
commons.float2Hz(scanlevel['binsize'], 3, True),
commons.float2Sec(scanlevel['interval']),
commons.float2Sec(scanlevel['quitafter'])
)
fullname = os.path.join(scanlevel['scandir'], filename)
return fullname
def createScanInfoFile(cmdargs, config, scanlevel, start, gain):
filename = calcFilename(scanlevel, start, gain)
scaninfofilename = "%s.scaninfo" % filename
scaninfo = {}
scaninfo['arguments'] = config['arguments']
scaninfo['global'] = config['global']
scaninfo['scanlevel'] = scanlevel
saveJSON(scaninfofilename, scaninfo)
def executeShell(cmd, directory=None):
p = subprocess.Popen(cmd, shell=True, cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
if p.returncode:
print 'Failed running %s' % cmd
raise Exception(output)
else:
pass
return output
def executeRTLPower(cmdargs, config, scanlevel, start):
# Create directory if not exists
if not os.path.isdir(scanlevel['scandir']):
print "executeRTLPower SCANDIR: %s" % scanlevel['scandir']
os.makedirs(scanlevel['scandir'])
for gain in scanlevel['gains']:
filename = calcFilename(scanlevel, start, gain)
# Ignore call rtl_power if file already exist
csv_filename = "%s.csv" % filename
exists = os.path.isfile(csv_filename)
if exists:
showVerbose(
config,
"%sScan '%s' : %shz-%shz already exists%s" % (
tcolor.GREEN,
scanlevel['name'],
commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
tcolor.DEFAULT
)
)
return
else:
running_filename = "%s.running" % filename
exists = os.path.isfile(running_filename)
if exists:
print "%sScan '%s' : delete old running file %shz-%shz" % (
tcolor.DEFAULT,
scanlevel['name'],
commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
)
os.remove(running_filename)
print "%sScan '%s' : %shz-%shz with %s gain / Begin: %s / Finish in: ~%s" % (
tcolor.DEFAULT,
scanlevel['name'],
commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
gain,
time.strftime("%H:%M:%S", time.localtime()),
commons.float2Sec(scanlevel['quitafter']),
)
cmddir = None
if os.name == "nt":
cmddir = "C:\\SDRHunter\\rtl-sdr-release\\x32"
cmd = "rtl_power -p %s -g %s -f %s:%s:%s -i %s -e %s \"%s\"" % (
config['global']['ppm'],
gain,
start,
start + scanlevel['windows'],
scanlevel['binsize'],
scanlevel['interval'],
scanlevel['quitafter'],
running_filename
)
# Create Scan info file
createScanInfoFile(cmdargs, config, scanlevel, start, gain)
# Call rtl_power shell command
executeShell(cmd, cmddir)
# Rename file
os.rename(running_filename, csv_filename)
def loadOrGenerateSummaryFile(csv_filename):
(filename, ext) = os.path.splitext(csv_filename)
summary_filename = '%s%s' % (filename, '.summary')
sdrdatas = commons.SDRDatas(csv_filename)
sdrdatas.genSummarizeSignal()
saveJSON(summary_filename, sdrdatas.summaries)
def executeSumarizeSignals(cmdargs, config, scanlevel, start):
for gain in scanlevel['gains']:
filename = calcFilename(scanlevel, start, gain)
# ignore if rtl_power file not exists
csv_filename = "%s.csv" % filename
exists = os.path.isfile(csv_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
csv_filename,
tcolor.DEFAULT,
)
)
continue
# Ignore call summary if file already exist
summary_filename = "%s.summary" % filename
exists = os.path.isfile(summary_filename)
if exists:
showVerbose(
config,
"%sSummarize '%s' : %shz-%shz%s for %s gain" % (
tcolor.GREEN,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
gain,
tcolor.DEFAULT,
)
)
continue
print "%sSummarize '%s' : %shz-%shz for %s gain" % (
tcolor.DEFAULT,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
gain
)
sdrdatas = commons.SDRDatas(csv_filename)
sdrdatas.genSummarizeSignal()
saveJSON(summary_filename, sdrdatas.summaries)
def executeSearchStations(config, stations, scanlevel, start):
filename = calcFilename(scanlevel, start)
# ignore if rtl_power file not exists
csv_filename = "%s.csv" % filename
exists = os.path.isfile(csv_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
csv_filename,
tcolor.DEFAULT,
)
)
return
# Ignore if call summary not exist
summary_filename = "%s.summary" % filename
exists = os.path.isfile(summary_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
summary_filename,
tcolor.DEFAULT,
)
)
return
summaries = loadJSON(summary_filename)
print "%sFind stations '%s' : %shz-%shz" % (
tcolor.DEFAULT,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
)
smooth_max = commons.smooth(np.array(summaries['max']['signal']),10, 'flat')
limitmin = summaries['min']['peak']['min']['mean'] - summaries['min']['peak']['min']['std']
limitmax = summaries['max']['mean'] + summaries['max']['std']
searchStation(scanlevel, stations, summaries, smooth_max, limitmin, limitmax)
def executeHeatmapParameters(cmdargs, config, scanlevel, start):
for gain in scanlevel['gains']:
filename = calcFilename(scanlevel, start, gain)
# Ignore if summary file not exists
summary_filename = "%s.summary" % filename
exists = os.path.isfile(summary_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
summary_filename,
tcolor.DEFAULT,
)
)
continue
summaries = loadJSON(summary_filename)
params_filename = "%s.hparam" % filename
exists = os.path.isfile(params_filename)
if exists:
showVerbose(
config,
"%sHeatmap Parameter '%s' : %shz-%shz%s" % (
tcolor.GREEN,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
tcolor.DEFAULT,
)
)
continue
print "%sHeatmap Parameter '%s' : %shz-%shz for % gain" % (
tcolor.DEFAULT,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
gain,
)
parameters = {}
parameters['reversetextorder'] = True
# Db
#parameters['db'] = {}
##parameters['db']['mean'] = summaries['avg']['mean']
#parameters['db']['min'] = summaries['avg']['min']
#parameters['db']['max'] = summaries['avg']['max']
# Text
parameters['texts'] = []
parameters['texts'].append({'text': "Min signal: %.2f" % summaries['avg']['min']})
parameters['texts'].append({'text': "Max signal: %.2f" % summaries['avg']['max']})
parameters['texts'].append({'text': "Mean signal: %.2f" % summaries['avg']['mean']})
parameters['texts'].append({'text': "Std signal: %.2f" % summaries['avg']['std']})
parameters['texts'].append({'text': ""})
parameters['texts'].append({'text': "avg min %.2f" % summaries['avg']['min']})
parameters['texts'].append({'text': "std min %.2f" % summaries['avg']['std']})
# Add sscanlevel stations name in legends
if 'stationsfilename' in scanlevel or 'heatmap' in config['global']:
parameters['legends'] = []
if 'stationsfilename' in scanlevel:
parameters['legends'].append(scanlevel['stationsfilename'])
if 'heatmap' in config['global']:
# Add global stations name in legends
if 'heatmap' in config['global'] and "stationsfilenames" in config['global']['heatmap']:
for stationsfilename in config['global']['heatmap']['stationsfilenames']:
parameters['legends'].append(stationsfilename)
saveJSON(params_filename, parameters)
def executeHeatmap(cmdargs, config, scanlevel, start):
for gain in scanlevel['gains']:
filename = calcFilename(scanlevel, start, gain)
csv_filename = "%s.csv" % filename
exists = os.path.isfile(csv_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
csv_filename,
tcolor.DEFAULT,
)
)
continue
params_filename = "%s.hparam" % filename
exists = os.path.isfile(params_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
params_filename,
tcolor.DEFAULT,
)
)
continue
# Check if scan exist
img_filename = "%s_heatmap.png" % filename
exists = os.path.isfile(img_filename)
if exists:
showVerbose(
config,
"%sHeatmap '%s' : %shz-%shz%s" % (
tcolor.GREEN,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
tcolor.DEFAULT
)
)
continue
print "%sHeatmap '%s' : %shz-%shz for %s gain" % (
tcolor.DEFAULT,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
gain,
)
print "CSV: %s" % csv_filename
datas = commons.SDRDatas(csv_filename)
for line in datas.samples:
print len(line)
print ""
# # Check calc or check if Heatmap paramters exists
# cmd = "python heatmap.py --parameters %s %s %s" % (
# params_filename,
# csv_filename,
# img_filename
# )
# print cmd
#
# # Call heatmap.py shell command
# executeShell(cmd, config['global']['heatmap']['dirname'])
def executeSpectre(cmdargs, config, scanlevel, start):
for gain in scanlevel['gains']:
filename = calcFilename(scanlevel, start, gain)
csv_filename = "%s.csv" % filename
exists = os.path.isfile(csv_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
csv_filename,
tcolor.DEFAULT,
)
)
return
# Ignore if summary file not exists
summary_filename = "%s.summary" % filename
exists = os.path.isfile(summary_filename)
if not exists:
showVerbose(
config,
"%s %s not exist%s" % (
tcolor.RED,
summary_filename,
tcolor.DEFAULT,
)
)
return
summaries = loadJSON(summary_filename)
# Check if scan exist
img_filename = "%s_spectre.png" % filename
exists = os.path.isfile(img_filename)
if exists:
showVerbose(
config,
"%sSpectre '%s' : %shz-%shz%s" % (
tcolor.GREEN,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
tcolor.DEFAULT
)
)
return
print "%sSpectre '%s' : %shz-%shz" % (
tcolor.DEFAULT,
scanlevel['name'], commons.float2Hz(start), commons.float2Hz(start + scanlevel['windows']),
)
plt.figure(figsize=(15,10))
plt.grid()
freqs = np.linspace(summaries['freq']['start'], summaries['freq']['end'], num=summaries['samples']['nbsamplescolumn'])
limitmin = summaries['min']['peak']['min']['mean'] - summaries['min']['peak']['min']['std']
limitmax = summaries['max']['mean'] + summaries['max']['std']
limits = np.linspace(limitmin, limitmax, 5)
# Max
for limit in limits:
plt.axhline(limit, color='blue')
smooth_max = commons.smooth(np.array(summaries['max']['signal']),10, 'flat')
plt.plot(freqs, smooth_max[:len(freqs)],color='red')
# Set X Limit
locs, labels = plt.xticks()
for idx in range(len(labels)):
labels[idx] = commons.float2Hz(locs[idx])
plt.xticks(locs, labels)
plt.xlabel('Freq in Hz')
# Set Y Limit
# plt.ylim(summary['groundsignal'], summary['maxsignal'])
plt.ylabel('Power density in dB')
plt.savefig(img_filename)
plt.close()
def showInfo(config, args):
# Show config
result_scan = []
if 'configs' in config:
for configname in config['configs']:
result_scan.append(
[
configname,
config['configs'][configname]['location'],
config['configs'][configname]['antenna'],
]
)
header = ['Config name', 'Location','Antenna']
print tabulate(result_scan, headers=header, stralign="right")
print ""
# Show the scan information table
result_scan = []
if 'scans' in config:
for scanlevel in config['scans']:
result_scan.append(
[
"%sHz" % commons.float2Hz(scanlevel['freq_start']),
"%sHz" % commons.float2Hz(scanlevel['freq_end']),
"%sHz" % commons.float2Hz(scanlevel['windows']),
commons.float2Sec(scanlevel['interval']),
scanlevel['nbsamples_lines'],
commons.float2Sec(commons.sec2Float(scanlevel['interval']) * scanlevel['nbsamples_lines']),
# scanlevel['quitafter'],
scanlevel['maxlevel_legend'],
]
)
header = [
'Freq. Start', 'Freq. End', 'Windows', 'Interval', 'Nb lines', 'Total time', 'Max legend level'
]
print tabulate(result_scan, headers=header, stralign="right")
# Show global config
if 'global' in config:
pprint.pprint(config['global'],indent=2)
def searchStation(scanlevel, stations, summaries, samples, limitmin, limitmax):
#search_limit = sorted(limit_list)
freqstep = summaries['freq']['step']
stations['stations'] = sorted(stations['stations'], key=lambda x: commons.hz2Float(x['freq_center']) - commons.hz2Float((x['bw'])))
bwmin = commons.hz2Float(scanlevel['minscanbw'])
bwmax = commons.hz2Float(scanlevel['maxscanbw'])
limits = np.linspace(limitmin, limitmax, 5)
for limit in limits:
# Search peak upper than limit
startup = -1
foundlower = False
for idx in np.arange(len(samples)):
powerdb = samples[idx]
isup = powerdb > limit
# Search first lower limit signal
if not foundlower:
if not isup:
foundlower = True
else:
continue
# Find first upper
if startup == -1:
if isup:
startup = idx
maxidx = startup
maxdb = powerdb
else:
# If upper, check if db is upper
if isup:
if powerdb > maxdb:
maxdb = powerdb
maxidx = idx
# If lower, calc bandwidth and max db
else:
endup = idx - 1
bw_nbstep = endup - startup
bw = bw_nbstep * freqstep
freqidx = startup + int(bw_nbstep / 2)
# TODO: compare with freqidx, set % error ?
freq_center = summaries['freq']['start'] + (maxidx * freqstep)
freq_center = summaries['freq']['start'] + (freqidx * freqstep)
freq_left = freq_center - bw
deltadb = (maxdb - limit)
if bwmin <= bw <= bwmax and deltadb > scanlevel['minrelativedb']:
print "Freq:%s / Bw:%s / Abs: %s dB / From ground:%.2f dB" % (commons.float2Hz(freq_center), commons.float2Hz(bw), maxdb, maxdb - limitmax)
found = False
for station in stations['stations']:
if freq_center >= commons.hz2Float(station['freq_center']) - bw and freq_center <= commons.hz2Float(station['freq_center']) + bw:
found = True
break
if not found:
stations['stations'].append(
{'freq_center': commons.float2Hz(freq_center),
'bw': commons.float2Hz(bw),
'powerdb': float("%.2f" % maxdb),
'relativedb': float("%.2f" % (maxdb - limitmin))
}
)
stations['stations'] = sorted(stations['stations'], key=lambda x: commons.hz2Float(x['freq_center']) - commons.hz2Float(x['bw']))
startup = -1
def scan(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
if not scanlevel['scanfromstations']:
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeRTLPower(args, config, scanlevel, left_freq)
def zoomedscan(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
if scanlevel['scanfromstations']:
stations = loadJSON(scanlevel['stationsfilename'])
confirmed_station = []
for station in stations['stations']:
if 'name' in station:
confirmed_station.append(station)
for station in confirmed_station:
freq_left = commons.hz2Float(station['freq_center']) - commons.hz2Float(scanlevel['windows'] / 2)
executeRTLPower(args, config, scanlevel, freq_left)
def generateSummaries(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeSumarizeSignals(args, config, scanlevel, left_freq)
# For scanlevel with stationsfilename
if 'stationsfilename' in scanlevel:
stations = loadJSON(scanlevel['stationsfilename'])
if stations:
confirmed_station = []
for station in stations['stations']:
if 'name' in station:
confirmed_station.append(station)
for station in confirmed_station:
freq_left = commons.hz2Float(station['freq_center']) - commons.hz2Float(scanlevel['windows'] / 2)
executeSumarizeSignals(args, config, scanlevel, freq_left)
def searchStations(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
stations_filename = os.path.join(config['global']['rootdir'], args.location, "scanresult.json")
stations = loadStations(stations_filename)
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeSearchStations(config, stations, scanlevel, left_freq)
saveJSON(stations_filename, stations)
def generateHeatmapParameters(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeHeatmapParameters(args, config, scanlevel, left_freq)
# For scanlevel with stationsfilename
if 'stationsfilename' in scanlevel:
stations = loadJSON(scanlevel['stationsfilename'])
confirmed_station = []
for station in stations['stations']:
if 'name' in station:
confirmed_station.append(station)
for station in confirmed_station:
freq_left = commons.hz2Float(station['freq_center']) - commons.hz2Float(scanlevel['windows'] / 2)
executeHeatmapParameters(args, config, scanlevel, freq_left)
def generateHeatmaps(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeHeatmap(args, config, scanlevel, left_freq)
# For scanlevel with stationsfilename
if 'stationsfilename' in scanlevel:
stations = loadJSON(scanlevel['stationsfilename'])
confirmed_station = []
for station in stations['stations']:
if 'name' in station:
confirmed_station.append(station)
for station in confirmed_station:
freq_left = commons.hz2Float(station['freq_center']) - commons.hz2Float(scanlevel['windows'] / 2)
executeHeatmap(args, config, scanlevel, freq_left)
def generateSpectres(config, args):
if 'scans' in config:
for scanlevel in config['scans']:
range = np.linspace(scanlevel['freq_start'],scanlevel['freq_end'], num=scanlevel['nbstep'], endpoint=False)
for left_freq in range:
executeSpectre(args, config, scanlevel, left_freq)
# For scanlevel with stationsfilename
if 'stationsfilename' in scanlevel:
stations = loadJSON(scanlevel['stationsfilename'])
confirmed_station = []
for station in stations['stations']:
if 'name' in station:
confirmed_station.append(station)
for station in confirmed_station:
freq_left = commons.hz2Float(station['freq_center']) - commons.hz2Float(scanlevel['windows'] / 2)
executeSpectre(args, config, scanlevel, freq_left)
def parse_arguments(cmdline=""):
"""Parse the arguments"""
parser = argparse.ArgumentParser(
description=__description__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-a', '--action',
action='store',
dest='action',
default='infos',
choices=[
'infos',
'scan',
'zoomedscan',
'gensummaries',
'searchstations',
'genheatmapparameters',
'genheatmaps',
'genspectres'
],
help='Action'
)
parser.add_argument(
'-l', '--location',
action='store',
dest='location',
required=True,
help='Scan location'
)
parser.add_argument(
'-f', '--filename',
action='store',
dest='filename',
default='sdrhunter.json',
help='JSON config filename'
)
parser.add_argument(
'-c', '--configname',
action='store',
dest='configname',
default=None,
help='Config name'
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s {version}'.format(version=__version__)
)
a = parser.parse_args(cmdline)
return a
def main():
# Parse arguments
args = parse_arguments(sys.argv[1:]) # pragma: no cover
# Load JSON config
config = commons.loadConfigFile(commons.getJSONConfigFilename(), args)
if not config:
raise Exception("No infos found in %s" % args.filename)
# Execute successive action
if args.action:
if 'infos' == args.action:
showInfo(config, args)
if 'scan' == args.action:
scan(config, args)
if 'zoomedscan' == args.action:
zoomedscan(config, args)
if 'gensummaries' == args.action:
generateSummaries(config, args)
if 'searchstations' == args.action:
searchStations(config, args)
if 'genheatmapparameters' == args.action:
generateHeatmapParameters(config, args)
if 'genheatmaps' == args.action:
generateHeatmaps(config, args)
if 'genspectres' == args.action:
generateSpectres(config, args)
if __name__ == '__main__':
main() # pragma: no cover
| gpl-3.0 |
pratyakshs/pgmpy | pgmpy/tests/test_models/test_BayesianModel.py | 3 | 15775 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianModel
import pgmpy.tests.help_functions as hf
from pgmpy.factors import TabularCPD
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
BayesianModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node('a')
self.assertListEqual(self.G.nodes(), ['a'])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge('d', 'e')
self.assertListEqual(sorted(self.G.nodes()), ['d', 'e'])
self.assertListEqual(self.G.edges(), [('d', 'e')])
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, 'a', 'a')
def test_add_edge_result_cycle(self):
self.G.add_edges_from([('a', 'b'), ('a', 'c')])
self.assertRaises(ValueError, self.G.add_edge, 'c', 'a')
def test_add_edges_from_string(self):
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['b', 'c']])
self.G.add_nodes_from(['d', 'e', 'f'])
self.G.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.G.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'a')])
def test_add_edges_from_result_cycle(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'b'), ('b', 'c'), ('c', 'a')])
def test_update_node_parents_bm_constructor(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.g.predecessors('a'), [])
self.assertListEqual(self.g.predecessors('b'), ['a'])
self.assertListEqual(self.g.predecessors('c'), ['b'])
def test_update_node_parents(self):
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.G.predecessors('a'), [])
self.assertListEqual(self.G.predecessors('b'), ['a'])
self.assertListEqual(self.G.predecessors('c'), ['b'])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('a', 'd'), ('b', 'd'),
('d', 'e'), ('b', 'c')])
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')] or
(edge[1], edge[0]) in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')])
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianModel([('a', 'd'), ('d', 'e'), ('b', 'd'), ('b', 'c'), ('a', 'b')])
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')] or
(edge[1], edge[0]) in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')])
def tearDown(self):
del self.G
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('d', 'g'), ('i', 'g'), ('g', 'l'),
('i', 's')])
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d')), ['d', 'g', 'l'])
self.assertEqual(sorted(self.G.active_trail_nodes('i')), ['g', 'i', 'l', 's'])
def test_active_trail_nodes_args(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d', observed='g')), ['d', 'i', 's'])
self.assertEqual(sorted(self.G.active_trail_nodes('l', observed='g')), ['l'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['i', 'l'])), ['s'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['d', 'l'])), ['g', 'i', 's'])
def test_is_active_trail_triplets(self):
self.assertTrue(self.G.is_active_trail('d', 'l'))
self.assertTrue(self.G.is_active_trail('g', 's'))
self.assertFalse(self.G.is_active_trail('d', 'i'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='g'))
self.assertFalse(self.G.is_active_trail('d', 'l', observed='g'))
self.assertFalse(self.G.is_active_trail('i', 'l', observed='g'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='l'))
self.assertFalse(self.G.is_active_trail('g', 's', observed='i'))
def test_is_active_trail(self):
self.assertFalse(self.G.is_active_trail('d', 's'))
self.assertTrue(self.G.is_active_trail('s', 'l'))
self.assertTrue(self.G.is_active_trail('d', 's', observed='g'))
self.assertFalse(self.G.is_active_trail('s', 'l', observed='g'))
def test_is_active_trail_args(self):
self.assertFalse(self.G.is_active_trail('s', 'l', 'i'))
self.assertFalse(self.G.is_active_trail('s', 'l', 'g'))
self.assertTrue(self.G.is_active_trail('d', 's', 'l'))
self.assertFalse(self.G.is_active_trail('d', 's', ['i', 'l']))
def test_get_cpds(self):
cpd_d = TabularCPD('d', 2, np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, np.random.rand(2, 4), ['d', 'i'], [2, 2])
cpd_l = TabularCPD('l', 2, np.random.rand(2, 2), ['g'], 2)
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d').variable, 'd')
def test_get_cpds1(self):
self.model = BayesianModel([('A', 'AB')])
cpd_a = TabularCPD('A', 2, np.random.rand(2, 1))
cpd_ab = TabularCPD('AB', 2, np.random.rand(2, 2), evidence=['A'],
evidence_card=[2])
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds('A').variable, 'A')
self.assertEqual(self.model.get_cpds('AB').variable, 'AB')
def test_add_single_cpd(self):
from pgmpy.factors import TabularCPD
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
from pgmpy.factors import TabularCPD
cpd_d = TabularCPD('d', 2, np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, np.random.rand(2, 4), ['d', 'i'], [2, 2])
cpd_l = TabularCPD('l', 2, np.random.rand(2, 2), ['g'], 2)
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], 2)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d'), cpd_d)
self.assertEqual(self.G.get_cpds('i'), cpd_i)
self.assertEqual(self.G.get_cpds('g'), cpd_g)
self.assertEqual(self.G.get_cpds('l'), cpd_l)
self.assertEqual(self.G.get_cpds('s'), cpd_s)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianModel()
self.model_disconnected.add_nodes_from(['A', 'B', 'C', 'D', 'E'])
self.model_connected = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
def test_disconnected_fit(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
self.model_disconnected.fit(values)
for node in ['A', 'B', 'C', 'D', 'E']:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (values.ix[:, node].value_counts() /
values.ix[:, node].value_counts().sum()).values
np_test.assert_array_equal(cpd.values, value)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop('E', axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(e_predict.values.ravel(),
np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,
1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,
1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 1, 0]))
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDirectedGraphCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianModel()
def test_add_single_cpd(self):
cpd = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff')
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff', 'grade')
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_cpd_for_node(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds('diff'), cpd1)
self.assertEqual(self.graph.get_cpds('intel'), cpd2)
self.assertEqual(self.graph.get_cpds('grade'), cpd3)
def test_get_cpd_raises_error(self):
cpd1 = TabularCPD('diff', 2, np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, np.random.rand(2, 4),
['diff', 'intel'], [2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, 'sat')
def tearDown(self):
del self.graph
| mit |
jdmcbr/geopandas | geopandas/io/tests/test_file.py | 1 | 25477 | from collections import OrderedDict
import datetime
import io
import os
import pathlib
import tempfile
import numpy as np
import pandas as pd
import fiona
from shapely.geometry import Point, Polygon, box
import geopandas
from geopandas import GeoDataFrame, read_file
from geopandas.io.file import fiona_env
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
from geopandas.tests.util import PACKAGE_DIR, validate_boro_df
import pytest
_CRS = "epsg:4326"
@pytest.fixture
def df_nybb():
nybb_path = geopandas.datasets.get_path("nybb")
df = read_file(nybb_path)
return df
@pytest.fixture
def df_null():
return read_file(
os.path.join(PACKAGE_DIR, "geopandas", "tests", "data", "null_geom.geojson")
)
@pytest.fixture
def file_path():
return os.path.join(PACKAGE_DIR, "geopandas", "tests", "data", "null_geom.geojson")
@pytest.fixture
def df_points():
N = 10
crs = _CRS
df = GeoDataFrame(
[
{"geometry": Point(x, y), "value1": x + y, "value2": x * y}
for x, y in zip(range(N), range(N))
],
crs=crs,
)
return df
# -----------------------------------------------------------------------------
# to_file tests
# -----------------------------------------------------------------------------
driver_ext_pairs = [("ESRI Shapefile", "shp"), ("GeoJSON", "geojson"), ("GPKG", "gpkg")]
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file(tmpdir, df_nybb, df_null, driver, ext):
""" Test to_file and from_file """
tempfilename = os.path.join(str(tmpdir), "boros." + ext)
df_nybb.to_file(tempfilename, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == 5
assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
# Write layer with null geometry out to file
tempfilename = os.path.join(str(tmpdir), "null_geom." + ext)
df_null.to_file(tempfilename, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == 2
assert np.alltrue(df["Name"].values == df_null["Name"])
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_pathlib(tmpdir, df_nybb, df_null, driver, ext):
""" Test to_file and from_file """
temppath = pathlib.Path(os.path.join(str(tmpdir), "boros." + ext))
df_nybb.to_file(temppath, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(temppath)
assert "geometry" in df
assert len(df) == 5
assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_bool(tmpdir, driver, ext):
"""Test error raise when writing with a boolean column (GH #437)."""
tempfilename = os.path.join(str(tmpdir), "temp.{0}".format(ext))
df = GeoDataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"geometry": [Point(0, 0), Point(1, 1), Point(2, 2)],
}
)
df.to_file(tempfilename, driver=driver)
result = read_file(tempfilename)
if driver == "GeoJSON":
# geojson by default assumes epsg:4326
result.crs = None
if driver == "ESRI Shapefile":
# Shapefile does not support boolean, so is read back as int
df["b"] = df["b"].astype("int64")
assert_geodataframe_equal(result, df)
def test_to_file_datetime(tmpdir):
"""Test writing a data file with the datetime column type"""
tempfilename = os.path.join(str(tmpdir), "test_datetime.gpkg")
point = Point(0, 0)
now = datetime.datetime.now()
df = GeoDataFrame({"a": [1, 2], "b": [now, now]}, geometry=[point, point], crs={})
df.to_file(tempfilename, driver="GPKG")
df_read = read_file(tempfilename)
assert_geoseries_equal(df.geometry, df_read.geometry)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_with_point_z(tmpdir, ext, driver):
"""Test that 3D geometries are retained in writes (GH #612)."""
tempfilename = os.path.join(str(tmpdir), "test_3Dpoint." + ext)
point3d = Point(0, 0, 500)
point2d = Point(1, 1)
df = GeoDataFrame({"a": [1, 2]}, geometry=[point3d, point2d], crs=_CRS)
df.to_file(tempfilename, driver=driver)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geoseries_equal(df.geometry, df_read.geometry)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_with_poly_z(tmpdir, ext, driver):
"""Test that 3D geometries are retained in writes (GH #612)."""
tempfilename = os.path.join(str(tmpdir), "test_3Dpoly." + ext)
poly3d = Polygon([[0, 0, 5], [0, 1, 5], [1, 1, 5], [1, 0, 5]])
poly2d = Polygon([[0, 0], [0, 1], [1, 1], [1, 0]])
df = GeoDataFrame({"a": [1, 2]}, geometry=[poly3d, poly2d], crs=_CRS)
df.to_file(tempfilename, driver=driver)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geoseries_equal(df.geometry, df_read.geometry)
def test_to_file_types(tmpdir, df_points):
""" Test various integer type columns (GH#93) """
tempfilename = os.path.join(str(tmpdir), "int.shp")
int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.intp,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
geometry = df_points.geometry
data = dict(
(str(i), np.arange(len(geometry), dtype=dtype))
for i, dtype in enumerate(int_types)
)
df = GeoDataFrame(data, geometry=geometry)
df.to_file(tempfilename)
def test_to_file_int64(tmpdir, df_points):
tempfilename = os.path.join(str(tmpdir), "int64.shp")
geometry = df_points.geometry
df = GeoDataFrame(geometry=geometry)
df["data"] = pd.array([1, np.nan] * 5, dtype=pd.Int64Dtype())
df.to_file(tempfilename)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geodataframe_equal(df_read, df, check_dtype=False, check_like=True)
def test_to_file_empty(tmpdir):
input_empty_df = GeoDataFrame()
tempfilename = os.path.join(str(tmpdir), "test.shp")
with pytest.raises(ValueError, match="Cannot write empty DataFrame to file."):
input_empty_df.to_file(tempfilename)
def test_to_file_privacy(tmpdir, df_nybb):
tempfilename = os.path.join(str(tmpdir), "test.shp")
with pytest.warns(DeprecationWarning):
geopandas.io.file.to_file(df_nybb, tempfilename)
def test_to_file_schema(tmpdir, df_nybb):
"""
Ensure that the file is written according to the schema
if it is specified
"""
tempfilename = os.path.join(str(tmpdir), "test.shp")
properties = OrderedDict(
[
("Shape_Leng", "float:19.11"),
("BoroName", "str:40"),
("BoroCode", "int:10"),
("Shape_Area", "float:19.11"),
]
)
schema = {"geometry": "Polygon", "properties": properties}
# Take the first 2 features to speed things up a bit
df_nybb.iloc[:2].to_file(tempfilename, schema=schema)
with fiona.open(tempfilename) as f:
result_schema = f.schema
assert result_schema == schema
def test_to_file_column_len(tmpdir, df_points):
"""
Ensure that a warning about truncation is given when a geodataframe with
column names longer than 10 characters is saved to shapefile
"""
tempfilename = os.path.join(str(tmpdir), "test.shp")
df = df_points.iloc[:1].copy()
df["0123456789A"] = ["the column name is 11 characters"]
with pytest.warns(
UserWarning, match="Column names longer than 10 characters will be truncated"
):
df.to_file(tempfilename, driver="ESRI Shapefile")
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_append_file(tmpdir, df_nybb, df_null, driver, ext):
""" Test to_file with append mode and from_file """
from fiona import supported_drivers
if "a" not in supported_drivers[driver]:
return None
tempfilename = os.path.join(str(tmpdir), "boros." + ext)
df_nybb.to_file(tempfilename, driver=driver)
df_nybb.to_file(tempfilename, mode="a", driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == (5 * 2)
expected = pd.concat([df_nybb] * 2, ignore_index=True)
assert_geodataframe_equal(df, expected, check_less_precise=True)
# Write layer with null geometry out to file
tempfilename = os.path.join(str(tmpdir), "null_geom." + ext)
df_null.to_file(tempfilename, driver=driver)
df_null.to_file(tempfilename, mode="a", driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == (2 * 2)
expected = pd.concat([df_null] * 2, ignore_index=True)
assert_geodataframe_equal(df, expected, check_less_precise=True)
# -----------------------------------------------------------------------------
# read_file tests
# -----------------------------------------------------------------------------
with fiona.open(geopandas.datasets.get_path("nybb")) as f:
CRS = f.crs["init"] if "init" in f.crs else f.crs_wkt
NYBB_COLUMNS = list(f.meta["schema"]["properties"].keys())
def test_read_file(df_nybb):
df = df_nybb.rename(columns=lambda x: x.lower())
validate_boro_df(df)
assert df.crs == CRS
# get lower case columns, and exclude geometry column from comparison
lower_columns = [c.lower() for c in NYBB_COLUMNS]
assert (df.columns[:-1] == lower_columns).all()
@pytest.mark.web
def test_read_file_remote_geojson_url():
url = (
"https://raw.githubusercontent.com/geopandas/geopandas/"
"master/geopandas/tests/data/null_geom.geojson"
)
gdf = read_file(url)
assert isinstance(gdf, geopandas.GeoDataFrame)
@pytest.mark.web
def test_read_file_remote_zipfile_url():
url = (
"https://raw.githubusercontent.com/geopandas/geopandas/"
"master/geopandas/datasets/nybb_16a.zip"
)
gdf = read_file(url)
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_read_file_textio(file_path):
file_text_stream = open(file_path)
file_stringio = io.StringIO(open(file_path).read())
gdf_text_stream = read_file(file_text_stream)
gdf_stringio = read_file(file_stringio)
assert isinstance(gdf_text_stream, geopandas.GeoDataFrame)
assert isinstance(gdf_stringio, geopandas.GeoDataFrame)
def test_read_file_bytesio(file_path):
file_binary_stream = open(file_path, "rb")
file_bytesio = io.BytesIO(open(file_path, "rb").read())
gdf_binary_stream = read_file(file_binary_stream)
gdf_bytesio = read_file(file_bytesio)
assert isinstance(gdf_binary_stream, geopandas.GeoDataFrame)
assert isinstance(gdf_bytesio, geopandas.GeoDataFrame)
def test_read_file_raw_stream(file_path):
file_raw_stream = open(file_path, "rb", buffering=0)
gdf_raw_stream = read_file(file_raw_stream)
assert isinstance(gdf_raw_stream, geopandas.GeoDataFrame)
def test_read_file_pathlib(file_path):
path_object = pathlib.Path(file_path)
gdf_path_object = read_file(path_object)
assert isinstance(gdf_path_object, geopandas.GeoDataFrame)
def test_read_file_tempfile():
temp = tempfile.TemporaryFile()
temp.write(
b"""
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [0, 0]
},
"properties": {
"name": "Null Island"
}
}
"""
)
temp.seek(0)
gdf_tempfile = geopandas.read_file(temp)
assert isinstance(gdf_tempfile, geopandas.GeoDataFrame)
temp.close()
def test_read_binary_file_fsspec():
fsspec = pytest.importorskip("fsspec")
# Remove the zip scheme so fsspec doesn't open as a zipped file,
# instead we want to read as bytes and let fiona decode it.
path = geopandas.datasets.get_path("nybb")[6:]
with fsspec.open(path, "rb") as f:
gdf = read_file(f)
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_read_text_file_fsspec(file_path):
fsspec = pytest.importorskip("fsspec")
with fsspec.open(file_path, "r") as f:
gdf = read_file(f)
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_infer_zipped_file():
# Remove the zip scheme so that the test for a zipped file can
# check it and add it back.
path = geopandas.datasets.get_path("nybb")[6:]
gdf = read_file(path)
assert isinstance(gdf, geopandas.GeoDataFrame)
# Check that it can sucessfully add a zip scheme to a path that already has a scheme
gdf = read_file("file+file://" + path)
assert isinstance(gdf, geopandas.GeoDataFrame)
# Check that it can add a zip scheme for a path that includes a subpath
# within the archive.
gdf = read_file(path + "!nybb.shp")
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_allow_legacy_gdal_path():
# Construct a GDAL-style zip path.
path = "/vsizip/" + geopandas.datasets.get_path("nybb")[6:]
gdf = read_file(path)
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_read_file_filtered(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
bbox = (
1031051.7879884212,
224272.49231459625,
1047224.3104931959,
244317.30894023244,
)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_read_file_filtered__rows(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
filtered_df = read_file(nybb_filename, rows=1)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (1, 5)
def test_read_file_filtered__rows_bbox(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
bbox = (
1031051.7879884212,
224272.49231459625,
1047224.3104931959,
244317.30894023244,
)
filtered_df = read_file(nybb_filename, bbox=bbox, rows=slice(-1, None))
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (1, 5)
def test_read_file_filtered__rows_bbox__polygon(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
bbox = box(
1031051.7879884212, 224272.49231459625, 1047224.3104931959, 244317.30894023244
)
filtered_df = read_file(nybb_filename, bbox=bbox, rows=slice(-1, None))
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (1, 5)
def test_read_file_filtered_rows_invalid():
with pytest.raises(TypeError):
read_file(geopandas.datasets.get_path("nybb"), rows="not_a_slice")
def test_read_file__ignore_geometry():
pdf = geopandas.read_file(
geopandas.datasets.get_path("naturalearth_lowres"), ignore_geometry=True
)
assert "geometry" not in pdf.columns
assert isinstance(pdf, pd.DataFrame) and not isinstance(pdf, geopandas.GeoDataFrame)
def test_read_file__ignore_all_fields():
gdf = geopandas.read_file(
geopandas.datasets.get_path("naturalearth_lowres"),
ignore_fields=["pop_est", "continent", "name", "iso_a3", "gdp_md_est"],
)
assert gdf.columns.tolist() == ["geometry"]
def test_read_file_filtered_with_gdf_boundary(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
bbox = geopandas.GeoDataFrame(
geometry=[
box(
1031051.7879884212,
224272.49231459625,
1047224.3104931959,
244317.30894023244,
)
],
crs=CRS,
)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_read_file_filtered_with_gdf_boundary__mask(df_nybb):
gdf_mask = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
gdf = geopandas.read_file(
geopandas.datasets.get_path("naturalearth_cities"),
mask=gdf_mask[gdf_mask.continent == "Africa"],
)
filtered_df_shape = gdf.shape
assert filtered_df_shape == (50, 2)
def test_read_file_filtered_with_gdf_boundary__mask__polygon(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
mask = box(
1031051.7879884212, 224272.49231459625, 1047224.3104931959, 244317.30894023244
)
filtered_df = read_file(nybb_filename, mask=mask)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_read_file_filtered_with_gdf_boundary_mismatched_crs(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
bbox = geopandas.GeoDataFrame(
geometry=[
box(
1031051.7879884212,
224272.49231459625,
1047224.3104931959,
244317.30894023244,
)
],
crs=CRS,
)
bbox.to_crs(epsg=4326, inplace=True)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_read_file_filtered_with_gdf_boundary_mismatched_crs__mask(df_nybb):
full_df_shape = df_nybb.shape
nybb_filename = geopandas.datasets.get_path("nybb")
mask = geopandas.GeoDataFrame(
geometry=[
box(
1031051.7879884212,
224272.49231459625,
1047224.3104931959,
244317.30894023244,
)
],
crs=CRS,
)
mask.to_crs(epsg=4326, inplace=True)
filtered_df = read_file(nybb_filename, mask=mask.geometry)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_read_file_empty_shapefile(tmpdir):
# create empty shapefile
meta = {
"crs": {},
"crs_wkt": "",
"driver": "ESRI Shapefile",
"schema": {
"geometry": "Point",
"properties": OrderedDict([("A", "int:9"), ("Z", "float:24.15")]),
},
}
fname = str(tmpdir.join("test_empty.shp"))
with fiona_env():
with fiona.open(fname, "w", **meta) as _: # noqa
pass
empty = read_file(fname)
assert isinstance(empty, geopandas.GeoDataFrame)
assert all(empty.columns == ["A", "Z", "geometry"])
def test_read_file_privacy(tmpdir, df_nybb):
with pytest.warns(DeprecationWarning):
geopandas.io.file.read_file(geopandas.datasets.get_path("nybb"))
class FileNumber(object):
def __init__(self, tmpdir, base, ext):
self.tmpdir = str(tmpdir)
self.base = base
self.ext = ext
self.fileno = 0
def __repr__(self):
filename = "{0}{1:02d}.{2}".format(self.base, self.fileno, self.ext)
return os.path.join(self.tmpdir, filename)
def __next__(self):
self.fileno += 1
return repr(self)
@pytest.mark.parametrize(
"driver,ext", [("ESRI Shapefile", "shp"), ("GeoJSON", "geojson")]
)
def test_write_index_to_file(tmpdir, df_points, driver, ext):
fngen = FileNumber(tmpdir, "check", ext)
def do_checks(df, index_is_used):
# check combinations of index=None|True|False on GeoDataFrame/GeoSeries
other_cols = list(df.columns)
other_cols.remove("geometry")
if driver == "ESRI Shapefile":
# ESRI Shapefile will add FID if no other columns exist
driver_col = ["FID"]
else:
driver_col = []
if index_is_used:
index_cols = list(df.index.names)
else:
index_cols = [None] * len(df.index.names)
# replicate pandas' default index names for regular and MultiIndex
if index_cols == [None]:
index_cols = ["index"]
elif len(index_cols) > 1 and not all(index_cols):
for level, index_col in enumerate(index_cols):
if index_col is None:
index_cols[level] = "level_" + str(level)
# check GeoDataFrame with default index=None to autodetect
tempfilename = next(fngen)
df.to_file(tempfilename, driver=driver, index=None)
df_check = read_file(tempfilename)
if len(other_cols) == 0:
expected_cols = driver_col[:]
else:
expected_cols = []
if index_is_used:
expected_cols += index_cols
expected_cols += other_cols + ["geometry"]
assert list(df_check.columns) == expected_cols
# similar check on GeoSeries with index=None
tempfilename = next(fngen)
df.geometry.to_file(tempfilename, driver=driver, index=None)
df_check = read_file(tempfilename)
if index_is_used:
expected_cols = index_cols + ["geometry"]
else:
expected_cols = driver_col + ["geometry"]
assert list(df_check.columns) == expected_cols
# check GeoDataFrame with index=True
tempfilename = next(fngen)
df.to_file(tempfilename, driver=driver, index=True)
df_check = read_file(tempfilename)
assert list(df_check.columns) == index_cols + other_cols + ["geometry"]
# similar check on GeoSeries with index=True
tempfilename = next(fngen)
df.geometry.to_file(tempfilename, driver=driver, index=True)
df_check = read_file(tempfilename)
assert list(df_check.columns) == index_cols + ["geometry"]
# check GeoDataFrame with index=False
tempfilename = next(fngen)
df.to_file(tempfilename, driver=driver, index=False)
df_check = read_file(tempfilename)
if len(other_cols) == 0:
expected_cols = driver_col + ["geometry"]
else:
expected_cols = other_cols + ["geometry"]
assert list(df_check.columns) == expected_cols
# similar check on GeoSeries with index=False
tempfilename = next(fngen)
df.geometry.to_file(tempfilename, driver=driver, index=False)
df_check = read_file(tempfilename)
assert list(df_check.columns) == driver_col + ["geometry"]
return
#
# Checks where index is not used/saved
#
# index is a default RangeIndex
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
do_checks(df, index_is_used=False)
# index is a RangeIndex, starting from 1
df.index += 1
do_checks(df, index_is_used=False)
# index is a Int64Index regular sequence from 1
df_p.index = list(range(1, len(df) + 1))
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
do_checks(df, index_is_used=False)
# index was a default RangeIndex, but delete one row to make an Int64Index
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry).drop(5, axis=0)
do_checks(df, index_is_used=False)
# no other columns (except geometry)
df = GeoDataFrame(geometry=df_p.geometry)
do_checks(df, index_is_used=False)
#
# Checks where index is used/saved
#
# named index
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
df.index.name = "foo_index"
do_checks(df, index_is_used=True)
# named index, same as pandas' default name after .reset_index(drop=False)
df.index.name = "index"
do_checks(df, index_is_used=True)
# named MultiIndex
df_p = df_points.copy()
df_p["value3"] = df_p["value2"] - df_p["value1"]
df_p.set_index(["value1", "value2"], inplace=True)
df = GeoDataFrame(df_p, geometry=df_p.geometry)
do_checks(df, index_is_used=True)
# partially unnamed MultiIndex
df.index.names = ["first", None]
do_checks(df, index_is_used=True)
# unnamed MultiIndex
df.index.names = [None, None]
do_checks(df, index_is_used=True)
# unnamed Float64Index
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
df.index = df_p.index.astype(float) / 10
do_checks(df, index_is_used=True)
# named Float64Index
df.index.name = "centile"
do_checks(df, index_is_used=True)
# index as string
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
df.index = pd.TimedeltaIndex(range(len(df)), "days")
# TODO: TimedeltaIndex is an invalid field type
df.index = df.index.astype(str)
do_checks(df, index_is_used=True)
# unnamed DatetimeIndex
df_p = df_points.copy()
df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
df.index = pd.TimedeltaIndex(range(len(df)), "days") + pd.DatetimeIndex(
["1999-12-27"] * len(df)
)
if driver == "ESRI Shapefile":
# Shapefile driver does not support datetime fields
df.index = df.index.astype(str)
do_checks(df, index_is_used=True)
# named DatetimeIndex
df.index.name = "datetime"
do_checks(df, index_is_used=True)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/util/test_validate_kwargs.py | 2 | 2040 | # -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = OrderedDict()
compat_args[good_arg] = "foo"
compat_args[bad_arg + "o"] = "bar"
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg))
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = "s"
compat_args["baz"] = None
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["f"] = None
compat_args["b"] = 1
compat_args["ba"] = "s"
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = ("For argument \"%s\" expected type bool, received type %s" %
(name, type(value).__name__))
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
| bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_NoRot/Geneva_inst_NoRot_2/fullgrid/IR.py | 30 | 9364 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine; will be called later
numplots = 12
def add_sub_plot(sub_num):
plt.subplot(3,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 9:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [75, #AR 3 7135
76, #TOTL 7325
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87] #H 1 9546
#create z array for this plot with given lines
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty IR Lines", fontsize=14)
# ---------------------------------------------------
for i in range(12):
add_sub_plot(i)
ax1 = plt.subplot(3,4,1)
add_patches(ax1)
print "figure complete"
plt.savefig('Dusty_Near_IR.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
abhishekgahlot/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
CamDavidsonPilon/StartupFiles | startup/01-plotting.py | 1 | 1243 | # nice plotting functions.
# %pyplot, %plt, %plot
@register_line_magic('plt')
@register_line_magic('plot') #this is just alias: if I type plot, I better plot.
@register_line_magic('pyplot')
@register_line_magic('pyploy') #common mispelling I make
def _pyplot(line):
_ip.run_line_magic('matplotlib', line)
_ip.run_code("""from matplotlib import pyplot as plt""")
# use Bayesian Methods for Hackers plotting style
_ip.run_code("""plt.style.use('bmh')""")
# better hists
def hist_(*args, **kwargs):
kwargs.pop('alpha', None)
kwargs.pop('histtype', None)
kwargs.pop('normed', None)
return plt.hist(*args, histtype='stepfilled', alpha=0.85, normed=True, **kwargs)
# <3 figsize
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
# aliases
_ip.user_ns['hist_'] = hist_
_ip.user_ns['figsize'] = figsize
_ip.user_ns['plot'] = plt.plot
_ip.user_ns['subplot'] = plt.subplot
del _pyplot
| mit |
victorbergelin/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/param/preprocessing.py | 4 | 25251 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import reforms
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json
| agpl-3.0 |
vhte/cefetdiss | hipervolume.py | 1 | 3179 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 Simon Wessing
# TU Dortmund University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#http://pythonhosted.org/inspyred/reference.html
# setcover metric - % um melhor que outra
import inspyred
import numpy as np
import matplotlib as plt
from pylab import *
from copy import copy
# Qual grupo de nao-dominados
grupoNSGAII = 'multi_nsgaII_SED_'
grupoSPEA2 = 'multi_spea2_SED_'
# ic 0: NSGA-II 1: SPEA2
x = [[],[]]
# preco 0: NSGA-II 1: SPEA2
y = [[],[]]
for i in range(1,31):
# Abrindo os NSGA-II
with open(grupoNSGAII+str(i), 'r') as f:
searchlines = f.readlines()
for j,line in enumerate(searchlines):
if "ic = " in line:
exec(line)
if "preco = " in line:
exec(line)
x[0] = x[0] + ic
y[0] = y[0] + preco
with open(grupoSPEA2+str(i), 'r') as f:
searchlines = f.readlines()
for j,line in enumerate(searchlines):
if "ic = " in line:
exec(line)
if "custo = " in line:
exec(line)
x[1] = x[1] + ic
y[1] = y[1] + custo
xReal = copy(x)
yReal = copy(y)
############################ Há 30x300(população/arquivo)
#print len(x[0])
i = 0
j = 0
y[0] = map(lambda d: d*(-1),y[0]) # Porque hypervolume do inspyred é para max()
y[1] = map(lambda d: d*(-1),y[1])
smetric = [[],[]] # 0 NSGA 1 SPEA
for i in range(0,30): # 9000 (30x300)
# Cria combinação x e y de cada execução i
c0 = zip(x[0][j:j+299],y[0][j:j+299])
c1 = zip(x[1][j:j+299],y[1][j:j+299])
j = j+300
smetric[0].append(inspyred.ec.analysis.hypervolume(c0))
smetric[1].append(inspyred.ec.analysis.hypervolume(c1))
data_to_plot = [smetric[0],smetric[1]]
# Create a figure instance
boxplot(data_to_plot)
#grid(True)
xticks([1, 2], ['NSGA-II', 'SPEA2'])
"""
frontNSGA = 1
frontSPEA = 1
referencePointNSGA = [16.7282,-614.12282223 ]
referencePointSPEA = [5,2836, -1078.5961772]
#AlimGra
#referencePoint = [10, 1205]
#AlimSED
#referencePoint = [460, 3135]
print inspyred.ec.analysis.hypervolume(frontNSGA)
print inspyred.ec.analysis.hypervolume(frontSPEA)
print len(frontNSGA)
"""
############ SET COVER METRIC ###########
# Pergunta: Entre[0,1] quantos pontos de NSGA são dominados por SPEA
cont = 0
for i in range(0,len(x[1])):
print 'Verificando solucao %d/%d' % (i,len(x[1]))
for j in range(0,len(x[0])):
if x[1][i] >= x[0][j] and y[1][i] <= y[0][j]:
cont = cont + 1
break
res = cont/float(len(x[0]))
print 'C(SPEA2, NSGA-II) = %f' % (res) | mit |
zorroblue/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
ricsatjr/mplstereonet | examples/contour_angelier_data.py | 2 | 2304 | """
Reproduce Figure 5 from Vollmer, 1995 to illustrate different density contouring
methods.
"""
import matplotlib.pyplot as plt
import mplstereonet
import parse_angelier_data
def plot(ax, strike, dip, rake, **kwargs):
ax.rake(strike, dip, rake, 'ko', markersize=2)
ax.density_contour(strike, dip, rake, measurement='rakes', **kwargs)
# Load data from Angelier, 1979
strike, dip, rake = parse_angelier_data.load()
# Setup a subplot grid
fig, axes = mplstereonet.subplots(nrows=3, ncols=4)
# Hide azimuth tick labels
for ax in axes.flat:
ax.set_azimuth_ticks([])
contours = [range(2, 18, 2), range(1,21,2), range(1,22,2)]
# "Standard" Kamb contouring with different confidence levels.
for sigma, ax, contour in zip([3, 2, 1], axes[:,0], contours):
# We're reducing the gridsize to more closely match a traditional
# hand-contouring grid, similar to Kamb's original work and Vollmer's
# Figure 5. `gridsize=10` produces a 10x10 grid of density estimates.
plot(ax, strike, dip, rake, method='kamb', sigma=sigma,
levels=contour, gridsize=10)
# Kamb contouring with inverse-linear smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:,1], contours):
plot(ax, strike, dip, rake, method='linear_kamb', sigma=sigma,
levels=contour)
template = r'$E={}\sigma$ Contours: ${}\sigma,{}\sigma,\ldots$'
ax.set_xlabel(template.format(sigma, *contour[:2]))
# Kamb contouring with exponential smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:,2], contours):
plot(ax, strike, dip, rake, method='exponential_kamb', sigma=sigma,
levels=contour)
# Title the different methods
methods = ['Kamb', 'Linear\nSmoothing', 'Exponential\nSmoothing']
for ax, title in zip(axes[0,:], methods):
ax.set_title(title)
# Hide top-right axis... (Need to implement Diggle & Fisher's method)
axes[0,-1].set_visible(False)
# Schmidt contouring (a.k.a. 1%)
plot(axes[1,-1], strike, dip, rake, method='schmidt', gridsize=25,
levels=range(3,20,3))
axes[1,-1].set_title('Schmidt')
axes[1, -1].set_xlabel('Contours: $3\%,6\%,\ldots$')
# Raw data.
axes[-1,-1].set_azimuth_ticks([])
axes[-1,-1].rake(strike, dip, rake, 'ko', markersize=2)
axes[-1,-1].set_xlabel('N={}'.format(len(strike)))
plt.show()
| mit |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/convert_matrix.py | 10 | 33329 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| bsd-2-clause |
TheCoSMoCompany/biopredyn | Prototype/scripts/solver_comparison.py | 1 | 1947 | #!/usr/bin/env python
# coding=utf-8
import libsbml
import libsbmlsim
import numpy as np
import matplotlib.pyplot as plt
from biopredyn import result
# Simulation conditions
model_file = "FEBS_antimony.xml"
start = 0.0
end = 20.0
steps = 100.0
step = (end - start) / steps
# Open SBML file
reader = libsbml.SBMLReader()
doc = reader.readSBMLFromFile(model_file)
# Simulate model with stiff solver
r_stiff = libsbmlsim.simulateSBMLFromString(
doc.toSBML(),
end,
step,
1,
0,
libsbmlsim.MTHD_RUNGE_KUTTA,
0)
stiff_result = result.TimeSeries()
stiff_result.import_from_libsbmlsim(r_stiff, 0.0)
# Simulate model with non-stiff solver
r_non_stiff = libsbmlsim.simulateSBMLFromString(
doc.toSBML(),
end,
step,
1,
0,
libsbmlsim.MTHD_ADAMS_MOULTON_2,
0)
non_stiff_result = result.TimeSeries()
names = non_stiff_result.import_from_libsbmlsim(r_non_stiff, 0.0)
# Plot results - for each species, time series produced
# by both solvers are plotted
time = np.array(stiff_result.get_time_steps()) # Same for all plots
plt.figure(1)
for s in range(len(names)):
if not str.lower(names[s]).__contains__("time"):
plt.subplot(2,2,s)
plt.title(str(names[s]))
stiff = stiff_result.get_quantities_per_species(names[s])
non_stiff = non_stiff_result.get_quantities_per_species(names[s])
plt.plot(time, stiff, label='stiff_solver')
plt.plot(time, non_stiff, label='non_stiff_solver')
plt.legend()
# Plot difference between stiff and non-stiff solutions
plt.figure(2)
plt.title("Absolute difference between stiff and non-stiff simulations")
for s in range(len(names)):
if not str.lower(names[s]).__contains__("time"):
stiff = np.array(stiff_result.get_quantities_per_species(names[s]))
non_stiff = np.array(non_stiff_result.get_quantities_per_species(names[s]))
diff = abs(stiff - non_stiff)
plt.plot(time, diff, label=str(names[s]))
plt.legend()
plt.show()
| bsd-3-clause |
google-research/google-research | graph_embedding/watch_your_step/graph_attention_learning.py | 1 | 15473 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph attention method to learn the correct context over random walks.
Reference implementation for the NIPS 2018 paper:
Watch Your Step: Learning Graph Embeddings Through Attention
Sami Abu-El-Haija, Bryan Perozzi, Rami Al-Rfou, Alex Alemi
https://arxiv.org/abs/1710.09599
Example Usage:
==============
1. First, install relevant requirements
# From google-research/
pip install -r graph_embedding/watch_your_step/requirements.txt
2. Second download datasets from [Abu-El-Haija et al, CIKM'17]:
# From google-research/
curl http://sami.haija.org/graph/datasets.tgz > datasets.tgz
tar zxvf datasets.tgz
export DATA_DIR=~datasets
** Second, run the code:
# From google-research/
python -m graph_embedding.watch_your_step.graph_attention_learning --dataset_dir ${DATA_DIR}/wiki-vote #pylint: disable=line-too-long
To save the output, please use --output_dir. Consider other flags for options.
Output file will contain train/test metrics, embeddings, as well as learned
context distributions.
"""
import json
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy
from sklearn import metrics
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
flags.DEFINE_integer('max_number_of_steps', 100,
'The maximum number of gradient steps.')
flags.DEFINE_float('learning_rate', 0.2, 'PercentDelta learning rate.')
flags.DEFINE_string(
'dataset_dir', None,
'Directory where all dataset files live. All data files '
'must be located here. Including {train,test}.txt.npy and '
'{train,test}.neg.txt.npy. ')
flags.mark_flag_as_required('dataset_dir')
flags.DEFINE_string('output_dir', None,
'If set, output metrics will be written.')
flags.DEFINE_integer('d', 4, 'embedding dimensions')
flags.DEFINE_integer(
'transition_powers', 5,
'Highest power of normalized adjacency (transition) '
'matrix.')
flags.DEFINE_float(
'context_regularizer', 0.1,
'Regularization co-efficient to the context distribution '
'parameters.')
flags.DEFINE_string('objective', 'nlgl',
'Choices are "rmse" or "nlgl" (neg. Log Graph Likelihood)')
flags.DEFINE_bool(
'share_embeddings', False,
'If set, left and right embedding dictionary will be shared.')
FLAGS = flags.FLAGS
NUM_NODES = 0
IS_DIRECTED = None
def IsDirected():
global IS_DIRECTED
if IS_DIRECTED is not None:
return IS_DIRECTED
IS_DIRECTED = os.path.exists(
os.path.join(FLAGS.dataset_dir, 'test.directed.neg.txt.npy'))
return IS_DIRECTED
def GetNumNodes():
global NUM_NODES
if NUM_NODES == 0:
index = pickle.load(
open(os.path.join(FLAGS.dataset_dir, 'index.pkl'), 'rb'))
NUM_NODES = len(index['index'])
return NUM_NODES
def Description():
return 'ds.%s.e.%i.o.%s' % (os.path.basename(FLAGS.dataset_dir), FLAGS.d,
FLAGS.objective)
def GetOrMakeAdjacencyMatrix():
"""Creates Adjacency matrix and caches it on disk with name a.npy."""
a_file = os.path.join(FLAGS.dataset_dir, 'a.npy')
if os.path.exists(a_file):
return numpy.load(open(a_file, 'rb'))
num_nodes = GetNumNodes()
a = numpy.zeros(shape=(num_nodes, num_nodes), dtype='float32')
train_edges = numpy.load(
open(os.path.join(FLAGS.dataset_dir, 'train.txt.npy'), 'rb'))
a[train_edges[:, 0], train_edges[:, 1]] = 1.0
if not IsDirected():
a[train_edges[:, 1], train_edges[:, 0]] = 1.0
numpy.save(open(a_file, 'wb'), a)
return a
def GetPowerTransitionPairs(highest_power):
return list(IterPowerTransitionPairs(highest_power))
def IterPowerTransitionPairs(highest_power):
"""Yields powers of transition matrix (T, T*T, T*T*T, ...).
It caches them on disk as t_<i>.npy, where <i> is the power. The first power
(i = 1) is not cached as it is trivially computed from the adjacency matrix.
Args:
highest_power: integer representing the highest power of the transition
matrix. This will be the number of yields.
"""
num_nodes = GetNumNodes()
for i in range(highest_power):
if i == 0:
a = GetOrMakeAdjacencyMatrix()
transition = a.T
degree = transition.sum(axis=0)
transition /= degree + 0.0000001
power_array = transition
else:
power_filename = os.path.join(FLAGS.dataset_dir, 't_%i.npy' % (i + 1))
if os.path.exists(power_filename):
power_array = numpy.load(open(power_filename, 'rb'))
else:
power_array = power_array.dot(transition)
print('Computing T^%i ...' % (i + 1)) # pylint: disable=superfluous-parens
numpy.save(open(power_filename, 'wb'), power_array)
print(' ... Saved T^%i' % (i + 1)) # pylint: disable=superfluous-parens
placeholder = tf.placeholder(tf.float32, shape=(num_nodes, num_nodes))
yield (placeholder, power_array)
def GetParametrizedExpectation(references):
r"""Calculates E[D; q_1, q_2, ...]: a parametrized (tensor) matrix D.
Which is defined as:
E[D; q] = P_0 * (Q_1*T + Q_2*T^2 + Q_3*T^3 + ...)
where Q_1, Q_2, ... = softmax(q_1, q_2, ...)
and vector (q_1, q_2, ...) is created as a "trainable variable".
Args:
references: Dict that will be populated as key-value pairs:
'combination': \sum_j Q_j T^j (i.e. E[D] excluding P_0).
'normed': The vector Q_1, Q_2, ... (sums to 1).
'mults': The vector q_1, q_2, ... (Before softmax, does not sum to 1).
Returns:
Tuple (E[D; q], feed_dict) where the first entry contains placeholders and
the feed_dict contains is a dictionary from the placeholders to numpy arrays
of the transition powers.
"""
feed_dict = {}
n = FLAGS.transition_powers
regularizer = FLAGS.context_regularizer
a = GetOrMakeAdjacencyMatrix()
transition = a.T
degree = transition.sum(axis=0)
# transition /= degree + 0.0000001
# transition_pow_n = transition
convex_combination = []
# vector q
mults = tf.Variable(numpy.ones(shape=(n), dtype='float32'))
# vector Q (output of softmax)
normed = tf.squeeze(tf.nn.softmax(tf.expand_dims(mults, 0)), 0)
references['mults'] = mults
references['normed'] = normed
transition_powers = GetPowerTransitionPairs(n)
for k, (placeholder, transition_pow) in enumerate(transition_powers):
feed_dict[placeholder] = transition_pow
convex_combination.append(normed[k] * placeholder)
d_sum = tf.add_n(convex_combination)
d_sum *= degree
tf.losses.add_loss(tf.reduce_mean(mults**2) * regularizer)
references['combination'] = convex_combination
return tf.transpose(d_sum) * GetNumNodes() * 80, feed_dict
# Helper function 1/3 for PercentDelta.
def GetPD(target_num_steps):
global_step = tf.train.get_or_create_global_step()
global_step = tf.cast(global_step, tf.float32)
# gs = 0, target = 1
# gs = num_steps, target = 0.01
# Solve: y = mx + c
# gives: c = 1
# m = dy / dx = (1 - 0.01) / (0 - num_steps) = - 0.99 / num_steps
# Therefore, y = 1 - (0.99/num_steps) * x
return -global_step * 0.99 / target_num_steps + 1
# Helper function 2/3 for PercentDelta.
def PlusEpsilon(x, eps=1e-5):
"""Returns x+epsilon, without changing element-wise sign of x."""
return x + (tf.cast(x < 0, tf.float32) * -eps) + (
tf.cast(x >= 0, tf.float32) * eps)
# Helper function 3/3 for PercentDelta.
def CreateGradMultipliers(loss):
"""Returns a gradient multiplier so that SGD becomes PercentDelta."""
variables = tf.trainable_variables() # tf.global_variables()
gradients = tf.gradients(loss, variables)
multipliers = {}
target_pd = GetPD(FLAGS.max_number_of_steps)
for v, g in zip(variables, gradients):
if g is None:
continue
multipliers[v] = target_pd / PlusEpsilon(
tf.reduce_mean(tf.abs(g / PlusEpsilon(v))))
return multipliers
def CreateEmbeddingDictionary(side, size):
num_nodes = GetNumNodes()
embeddings = numpy.array(
numpy.random.uniform(low=-0.1, high=0.1, size=(num_nodes, size)),
dtype='float32')
embeddings = tf.Variable(embeddings, name=side + 'E')
tf.losses.add_loss(tf.reduce_mean(embeddings**2) * 1e-6)
return embeddings
def CreateObjective(g, target_matrix):
"""Returns the objective function (can be nlgl or rmse)."""
if FLAGS.objective == 'nlgl': # Negative log likelihood
# target_matrix is E[D; q], which is used in the "positive part" of the
# likelihood objective. We use true adjacency for the "negative part", as
# described in our paper.
true_adjacency = tf.Variable(
GetOrMakeAdjacencyMatrix(), name='adjacency', trainable=False)
logistic = tf.sigmoid(g)
return -tf.reduce_mean(
tf.multiply(target_matrix, tf.log(PlusEpsilon(logistic))) +
tf.multiply(1 - true_adjacency, tf.log(PlusEpsilon(1 - logistic))))
elif FLAGS.objective == 'rmse': # Root mean squared error
return tf.reduce_mean((g - target_matrix)**2)
else:
logging.fatal('unknown objective "%s".', FLAGS.objective)
def CreateGFn(net_l, net_r):
return tf.matmul(net_l, tf.transpose(net_r))
def LogMsg(msg):
logging.info(msg)
def Write(eval_metrics):
if FLAGS.output_dir:
out_json = os.path.join(FLAGS.output_dir, Description() + '.json')
open(out_json, 'w').write(json.dumps(eval_metrics))
BEST_EVAL = None
BEST_TF_PARAMS = None
def RunEval(sess, g, test_pos_arr, test_neg_arr, train_pos_arr, train_neg_arr,
i, v_total_loss, v_objective_loss, eval_metrics, feed_dict):
"""Calls sess.run(g) and computes AUC metric for test and train."""
scores = sess.run(g, feed_dict=feed_dict)
# Compute test auc:
test_pos_prods = scores[test_pos_arr[:, 0], test_pos_arr[:, 1]]
test_neg_prods = scores[test_neg_arr[:, 0], test_neg_arr[:, 1]]
test_y = [0] * len(test_neg_prods) + [1] * len(test_pos_prods)
test_y_pred = numpy.concatenate([test_neg_prods, test_pos_prods], 0)
test_auc = metrics.roc_auc_score(test_y, test_y_pred)
# Compute train auc:
train_pos_prods = scores[train_pos_arr[:, 0], train_pos_arr[:, 1]]
train_neg_prods = scores[train_neg_arr[:, 0], train_neg_arr[:, 1]]
train_y = [0] * len(train_neg_prods) + [1] * len(train_pos_prods)
train_y_pred = numpy.concatenate([train_neg_prods, train_pos_prods], 0)
train_auc = metrics.roc_auc_score(train_y, train_y_pred)
LogMsg('@%i test/train auc=%f/%f obj.loss=%f total.loss=%f' %
(i, test_auc, train_auc, v_objective_loss, v_total_loss))
# Populate metrics.
eval_metrics['train auc'].append(float(train_auc))
eval_metrics['test auc'].append(float(test_auc))
eval_metrics['i'].append(i)
eval_metrics['total loss'].append(float(v_total_loss))
eval_metrics['objective loss'].append(float(v_objective_loss))
if train_auc > eval_metrics['best train auc']:
eval_metrics['best train auc'] = float(train_auc)
eval_metrics['test auc at best train'] = float(test_auc)
eval_metrics['i at best train'] = i
return train_auc
def main(argv=()):
del argv # Unused.
if FLAGS.output_dir and not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
references = {}
net_l = CreateEmbeddingDictionary('L', FLAGS.d)
if FLAGS.share_embeddings:
net_r = net_l
else:
net_r = CreateEmbeddingDictionary('R', FLAGS.d)
g = CreateGFn(net_l, net_r)
target_matrix, feed_dict = GetParametrizedExpectation(references)
if not isinstance(target_matrix, tf.Tensor):
target_matrix = tf.Variable(target_matrix, name='target', trainable=False)
objective_loss = CreateObjective(g, target_matrix)
tf.losses.add_loss(objective_loss)
loss = tf.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
# Set up training.
grad_mults = CreateGradMultipliers(loss)
train_op = contrib_slim.learning.create_train_op(
loss, optimizer, gradient_multipliers=grad_mults)
if IsDirected():
test_neg_file = os.path.join(FLAGS.dataset_dir, 'test.directed.neg.txt.npy')
test_neg_arr = numpy.load(open(test_neg_file, 'rb'))
else:
test_neg_file = os.path.join(FLAGS.dataset_dir, 'test.neg.txt.npy')
test_neg_arr = numpy.load(open(test_neg_file, 'rb'))
test_pos_file = os.path.join(FLAGS.dataset_dir, 'test.txt.npy')
test_pos_arr = numpy.load(open(test_pos_file, 'rb'))
train_pos_file = os.path.join(FLAGS.dataset_dir, 'train.txt.npy')
train_neg_file = os.path.join(FLAGS.dataset_dir, 'train.neg.txt.npy')
train_pos_arr = numpy.load(open(train_pos_file, 'rb'))
train_neg_arr = numpy.load(open(train_neg_file, 'rb'))
sess = tf.Session()
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
sess.run(tf.global_variables_initializer())
eval_metrics = {
'train auc': [],
'test auc': [],
'i': [],
'i at best train': 0,
'best train auc': 0,
'test auc at best train': 0,
'total loss': [],
'objective loss': [],
'mults': [],
'normed_mults': [],
}
# IPython.embed()
all_variables = tf.trainable_variables() + (
[tf.train.get_or_create_global_step()])
best_train_values = None
best_train_auc = 0
for i in range(FLAGS.max_number_of_steps):
# import pdb; pdb.set_trace()
_, v_total_loss, v_objective_loss = sess.run(
(train_op, loss, objective_loss), feed_dict=feed_dict)
if 'update' in references:
references['update'](sess)
if i % 4 == 0: # Compute eval every 4th step.
train_auc = RunEval(sess, g, test_pos_arr, test_neg_arr, train_pos_arr,
train_neg_arr, i, v_total_loss, v_objective_loss,
eval_metrics, feed_dict)
if 'mults' in references:
mults, normed_mults = sess.run((references['mults'],
references['normed']))
eval_metrics['mults'].append(list(map(float, list(mults))))
eval_metrics['normed_mults'].append(
list(map(float, list(normed_mults))))
if train_auc > best_train_auc: # Found new best.
best_train_auc = train_auc
# Memorize variables.
best_train_values = sess.run(all_variables)
if i % 100 == 0:
Write(eval_metrics)
if i - 100 > eval_metrics['i at best train']:
LogMsg('Reached peak a while ago. Terminating...')
break
Write(eval_metrics)
if FLAGS.output_dir:
# Write trained parameters.
last_params = os.path.join(FLAGS.output_dir, Description() + '.last.pkl')
best_params = os.path.join(FLAGS.output_dir, Description() + '.best.pkl')
names = [v.name for v in all_variables]
last_train_values = sess.run(all_variables)
pickle.dump(list(zip(names, last_train_values)), open(last_params, 'wb'))
pickle.dump(list(zip(names, best_train_values)), open(best_params, 'wb'))
return 0
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
ofgulban/scikit-image | skimage/io/manage_plugins.py | 14 | 10495 | """Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
import sys
if sys.version.startswith('3'):
from configparser import ConfigParser # Python 3
else:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib'],
'imshow_collection': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'imshow_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imshow_collection',
'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins are shown in the "
"`skimage.io` docstring.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection', 'imshow_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
| bsd-3-clause |
GraphProcessor/CommunityDetectionCodes | Prensentation/algorithms/dynamics/problem_vis.py | 1 | 1390 | import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
idx = 0
def vis_post_output(graph, node_dict, name):
global idx
color_dict = {1: 'red', 2: 'green', 3: 'blue', 4: 'yellow', 5: 'pink', 6: 'purple', 7: 'black', 8: 'orange',
9: 'cyan'}
pos = nx.circular_layout(graph)
nx.draw(graph, with_labels=True, pos=pos, font_size=20, node_size=2000, alpha=0.8, width=4,
edge_color='grey', node_color='white')
for node in node_dict:
nx.draw_networkx_nodes(graph, pos=pos, nodelist=[node], node_size=2000, alpha=0.4,
node_color=color_dict[node_dict[node]])
plt.axis('off')
plt.savefig('./output_graph' + name + str(idx) + '.png', bbox_inches='tight', pad_inches=0,
transparent=True)
plt.show()
idx += 1
if __name__ == '__main__':
graph = nx.read_edgelist('example_edge_list.txt', nodetype=int)
print graph.edges(data=True)
# vis of async label propagation
with open('async_lp_res0.txt') as ifs:
dict_list = ifs.readlines()
for my_dict in dict_list:
vis_post_output(graph, eval(my_dict), '_async')
with open('sync_lp_res0.txt') as ifs:
dict_list = ifs.readlines()
for my_dict in dict_list:
vis_post_output(graph, eval(my_dict), '_sync')
| gpl-2.0 |
SuLab/RASLseqAligner | src/RASLseqAlign.py | 1 | 4792 |
import random
import pandas as pd
import os
import sys
def rasl_probe_blast(read_file_path, blastn_path, db_path, print_on=False):
"""
This function returns blast results from queries optimized for rasl-seq conditions and outputs
a custom format
Parameters
----------
read_file_path: str, path to temp blast input file
Specifies where temporary blast input file has been written
blastn_path: str, path to blastn executable
Specifies full path to blastn executable
db_path: str, path to target BLAST database
Specifies path to on_off_target BLAST database
Returns
-------
Pandas Blast Results dataframe
index = qseqid, rasl_probe sequence
cols = ['qseqid','mismatch','evalue','sseqid','qlen', 'length','qseq','qstart','sseq','sstart','send']
"""
#SETTING WRITE PATH FOR BLAST OUTPUT
write_path = read_file_path.rstrip(".txt") + "_blast_output.txt"
db = " -db " + db_path
#SETTING BLAST WORD SIZE
wordsize = "-word_size 8 "
blast_run = os.system(blastn_path+"/blastn" + " -task blastn-short -query " \
+ read_file_path +" -evalue 1e-6 "+ wordsize + db +" -max_target_seqs 1" +" -strand 'plus' -xdrop_gap 7 -num_threads 12" \
+ " -outfmt '6 -outfmt qseqid -outfmt mismatch -outfmt evalue -outfmt sseqid -outfmt qlen -outfmt length -outfmt qseq -outfmt qstart -outfmt sseq -outfmt sstart -outfmt send' >"+ write_path)
# Evlaue of 20 was used to account for a cutoff of 13mer matching
if print_on:
print "BLAST COMMAND\n" + blastn_path + " -task blastn-short -query " \
+ read_file_path +" -evalue 1e-6 "+ wordsize + db +" -max_target_seqs 1" +" -strand 'plus' -xdrop_gap 7 -num_threads 12" \
+ " -outfmt '6 -outfmt qseqid -outfmt mismatch -outfmt evalue -outfmt sseqid -outfmt qlen -outfmt length -outfmt qseq -outfmt qstart -outfmt sseq -outfmt sstart -outfmt send' >"+ write_path
#CONVERTING BLAST OUTPUT INTO DATAFRAME
bl_results = pd.read_table(write_path, header=None)
bl_results.columns=['qseqid','mismatch','evalue','sseqid','qlen', 'length','qseq','qstart','sseq','sstart','send']
bl_results['probe'] = bl_results.sseqid.apply(lambda x: "_".join(x.split("_")))
bl_results.ix[0]
bl_results.set_index('qseqid',drop=False,inplace=True)
#REMOVING INPUT/OUTPUT BLAST FILES
os.system('rm ' + read_file_path)
os.system('rm '+ write_path)
os.system('rm ' + db_path + ".*")
return bl_results
def get_blast_alignments(collapsed_read_df, blastn_path, db_path, print_on=False):
'''
This function returns the rasl_probe sequence blast alignments
Parameters
----------
collapsed_read_df: pandas dataframe, must contain columns ['rasl_probe']
collapsed_read_df['rasl_probe'] - fastq sequence observed between rasl adaptor sequences
blastn_path: str, path to blastn executable
Specifies full path to blastn executable
db_path: str, path to target BLAST database
Specifies path to on_off_target BLAST database
Returns
-------
Pandas Blast Results dataframe
index = qseqid #which is the ~40 nt rasl_probe sequence
cols = ['qseqid','mismatch','evalue','sseqid','qlen', 'length','qseq','qstart','sseq','sstart','send']
'''
#CREATING BLAST INPUT FILE
random_file_handle = str(random.randrange(0,1000000)) #setting random seed for temp file writing
blast_write_path = db_path + 'temp_blast_' + random_file_handle +".txt"
#WRITING TEMP BLAST INPUT FILE
blast_input = open(blast_write_path,"w")
for items in collapsed_read_df.rasl_probe.unique(): #only blasting unique perfect matches
blast_input.write(">"+items + "\n" + items + "\n")
blast_input.flush()
blast_input.close()
#ARGUMENTS FOR BLAST FUNCTION
bl_results = rasl_probe_blast(blast_write_path, blastn_path, db_path, print_on=False)
return bl_results
##PASS AN OBJECT (RASLseqReads) WITH ALL OF THESE ATTRIBUTES OR PASS THE INDIVIDUAL OBJECTS?
def get_rasl_blast_df(collapsed_read_df, blastn_path, db_path, print_on=False):
#PASS OBJECT
#BLAST RASL_PROBE SEQ AGAINST ALL COMBINATIONS OF ACCEPTOR AND DONOR PROBES
bl_results = get_blast_alignments(collapsed_read_df, blastn_path, db_path, print_on=False)
#JOINING BLAST RESULTS WITH COLLAPSED_READ_DF
collapsed_read_df.set_index('rasl_probe',inplace=True,drop=False)
collapsed_read_df = collapsed_read_df.join(bl_results,how='inner')
return collapsed_read_df
| mit |
aba1476/spark-timeseries | python/sparkts/utils.py | 4 | 1401 | import sys
import os
import logging
import pandas as pd
from glob import glob
def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME. """
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
logging.error("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
except ValueError as e:
logging.error(str(e))
exit(-1)
def quiet_py4j():
logger = logging.getLogger('py4j')
logger.setLevel(logging.INFO)
def datetime_to_millis(dt):
"""
Accept a string, Pandas Timestamp, or long, and return millis since the epoch.
"""
if isinstance(dt, pd.Timestamp):
return dt.value / 1000000
elif isinstance(dt, str):
return pd.Timestamp(dt).value / 1000000
elif isinstance(dt, long):
return dt
raise ValueError
| apache-2.0 |
cpcloud/blaze | blaze/compute/tests/test_mysql_compute.py | 3 | 1619 | from __future__ import absolute_import, print_function, division
from getpass import getuser
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('pymysql')
from odo import odo, drop, discover
import pandas as pd
from blaze import symbol, compute
from blaze.utils import example, normalize
@pytest.yield_fixture(scope='module')
def data():
try:
t = odo(
example('nyc.csv'),
'mysql+pymysql://%s@localhost/test::nyc' % getuser()
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t.bind
finally:
drop(t)
@pytest.fixture
def db(data):
return symbol('test', discover(data))
def test_agg_sql(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data)
expected = """
select
min(alias.passenger_count) as passenger_count_min
from
(select
nyc.passenger_count as passenger_count
from
nyc
where nyc.passenger_count < %s) as alias
"""
assert normalize(str(result)) == normalize(expected)
def test_agg_compute(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data)
passenger_count = odo(compute(db.nyc.passenger_count, {db: data}), pd.Series)
assert passenger_count[passenger_count < 4].min() == result.scalar()
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
pv/scikit-learn | sklearn/utils/tests/test_multiclass.py | 72 | 15350 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
xubenben/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
jniediek/combinato | tools/plot_thr_and_artifacts.py | 1 | 4805 | # -*- encoding: utf-8 -*-
# JN 2015-05-11
# what do I want in this script?
# * extraction thresholds in each region
# (and regression line, or other measure of variability)
# * rationale it's interesting to see whether highly variable channels are
# in one macro
# * firing rate stability for non-artifacts (correlated with threshold?)
# * percentage of artifacts of each category (correlated within region?)
# structure of the script
# region -> find channels -> plot extraction thr and firing rates
# -> write down artifact criteria
"""
plot extraction thresholds and spike counts over time
"""
from __future__ import print_function, division
import os
import numpy as np # pylint: disable=E1101
import tables
import matplotlib.pyplot as mpl
import matplotlib.cm as cm
from combinato import SortingManagerGrouped, get_regions, artifact_id_to_name
print(artifact_id_to_name)
FIGSIZE = (7, 6)
REGIONS = ('A', 'AH', 'MH', 'PH', 'EC', 'PHC', 'I')
LEFT_COLORS = cm.spectral(np.linspace(0, 1, len(REGIONS)))
RIGHT_COLORS = cm.summer(np.linspace(0, 1, len(REGIONS)))
NUM_COLORS = cm.winter(np.linspace(0, 1, 8))
COLORDICT = {}
for i, region in enumerate(REGIONS):
COLORDICT['L' + region] = LEFT_COLORS[i]
COLORDICT['R' + region] = RIGHT_COLORS[i]
for i in range(1, 9):
COLORDICT[str(i)] = NUM_COLORS[i-1]
JOBS = ('thr', 'arti')
def plotthr(thrplot, fireplot, thr, times, color='r'):
"""
plot the threshold data
"""
xdata = thr[:, :2].ravel()
xdata -= xdata[0]
xdata /= 6e4
xlim = xdata[[0, -1]]
thrs = np.vstack((thr[:, 2], thr[:, 2])).T.ravel()
thrplot.plot(xdata, thrs, color=color)
thrplot.set_xlim(xlim)
xtimes = (times - times[0])/6e4
# countdata = np.linspace(0, 1, len(xtimes))
# fireplot.plot(xtimes, countdata, color=color)
# fireplot.set_xlim(xlim)
bins = np.append(thr[:, 0], thr[-1, 1])
# bins /= 1e3
spcount, _ = np.histogram(times, bins=bins)
spcount = spcount.astype(float)
spcount /= times.shape[0]
print(spcount)
spplotdata = np.vstack((spcount, spcount)).T.ravel()
fireplot.plot(xdata, spplotdata, color=color)
def create_plots():
fig = mpl.figure(figsize=FIGSIZE)
plot = fig.add_subplot(1, 2, 1)
plot.set_ylabel(u'µV')
plot.set_xlabel(u'min')
plot.set_title(u'Extraction threshold over time')
plot2 = fig.add_subplot(1, 2, 2)
# plot2 = plot.twinx()
plot2.set_xlabel(u'min')
plot2.set_ylabel('% fired')
plot2.set_title('Spike count over time')
return plot, plot2
def plotarti(artifacts):
"""
plots artifact statistics
"""
tot = len(artifacts)
for artid, name in artifact_id_to_name:
perc = (artifacts == artid).sum()/tot
print('{}: {:.1%}'.format(name, perc))
def main(fnames, sign='pos', title=''):
"""
opens the file, calls plot
"""
thrplot, fireplot = create_plots()
thrplot.set_title(title)
thr_legend_handles = {}
for fname in fnames:
if os.path.isdir(fname):
fname = os.path.join(fname, 'data_' + fname + '.h5')
man = SortingManagerGrouped(fname)
thr = man.h5datafile.root.thr[:]
times = man.h5datafile.get_node('/' + sign, 'times')[:]
if not len(times):
continue
try:
artifacts = man.h5datafile.get_node('/' + sign, 'artifacts')[:]
except tables.NoSuchNodeError:
print('No artifacts defined')
artifacts = None
if man.header is not None:
entname = man.header['AcqEntName']
print(entname[-1])
color = COLORDICT[entname[-1]]
entname = entname[-1]
else:
color = 'k'
entname = 'unknown region'
del man
if 'thr' in JOBS:
plotthr(thrplot, fireplot, thr, times, color)
if entname not in thr_legend_handles:
thr_legend_handles[entname] = mpl.Line2D([0], [0], color=color,
label=entname)
if 'arti' in JOBS:
if artifacts is not None:
plotarti(artifacts)
# thrplot.legend(handles=thr_legend_handles.values())
def loop_over_regions(path):
"""
do the plots by region
"""
from collections import defaultdict
regions = get_regions(path)
regions_to_fnames = defaultdict(list)
for reg in regions:
ncsfiles = regions[reg]
for fname in ncsfiles:
if os.path.isdir(fname[:-4]):
regions_to_fnames[reg].append(os.path.basename(fname[:-4]))
for reg in regions_to_fnames:
main(regions_to_fnames[reg], 'pos', reg)
if __name__ == "__main__":
loop_over_regions(os.getcwd())
mpl.show()
| mit |
yonglehou/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
Myasuka/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/tree/plot_tree_regression.py | 40 | 1470 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
awsteiner/o2scl | examples/plot/old/ex_nucmass_plot.py | 1 | 1756 | """
Plot data from ex_nucmass
"""
import numpy as np
import matplotlib.pyplot as plot
import o2sclpy
import math
gc=o2sclpy.plotter()
gc.read('../ex_nucmass_table.o2')
gc.canvas_flag=1
Zgrid=range(1,121)
Ngrid=range(1,181)
sl=np.zeros(shape=(120,180))
labels=['Semi-empirical',
'Moller et al. (1995)',
'HFB 14',
'HFB 21',
'HFB 27',
'AME (2003)',
'Duflo and Zuker (1996)',
'Koura et al. (2005)',
'Dieperink et al. (2009)',
'Wang et al. (2010)',
'Liu et al. (2011)']
names=['se','mnmsk','hfb14','hfb21','hfb27','ame03','dz96',
'ktuy05','dvi','ws32','ws36']
for i in range(0,11):
(fig,ax)=o2sclpy.default_plot()
# First initialize slice to zero
for N in range(0,180):
for Z in range(0,120):
sl[Z,N]=0
# Now fill with data
print('name:',names[i],'nlines:',gc.dset['nlines'][0])
for row in range(0,gc.dset['nlines'][0]):
if gc.dset['data/N'][row]>7:
if gc.dset['data/Z'][row]>7:
val=gc.dset['data/'+names[i]][row]
sl[int(gc.dset['data/Z'][row]-0.99),
int(gc.dset['data/N'][row]-0.99)]=val
# Now plot
cax=plot.imshow(sl,interpolation='nearest',origin='lower',
extent=[1,180,1,120],aspect='auto',cmap='PuOr')
cbar=plot.colorbar(cax,orientation='vertical')
ax.text(0.55,-0.08,'N',fontsize=16,va='center',ha='center',
transform=ax.transAxes)
ax.text(-0.1,0.55,'Z',fontsize=16,va='center',ha='center',
transform=ax.transAxes)
ax.text(0.55,0.92,labels[i],fontsize=16,va='center',ha='center',
transform=ax.transAxes)
plot.savefig('ex_nucmass_'+names[i]+'.png')
plot.clf()
| gpl-3.0 |
abdulbaqi/ThinkStats2 | code/thinkplot.py | 75 | 18140 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(1, 3): (14, 6),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def _UnderrideColor(options):
if 'color' in options:
return options
color_iter = _Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
# TODO: reconsider whether this should warn
# warnings.warn('Warning: Brewer ran out of colors.')
_Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
global LEGEND
LEGEND = options.get('legend', LEGEND)
if LEGEND:
global LOC
LOC = options.get('loc', LOC)
pyplot.legend(loc=LOC)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| gpl-3.0 |
chrsrds/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 21 | 2437 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.)
labels[0] = outer
labels[-1] = inner
# #############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=0.8)
label_spread.fit(X, labels)
# #############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
Ginkgo-Biloba/Misc-Python | numpy/Pandas2Excel.py | 1 | 2036 | # coding = utf-8
"""
(2) 读写 Excel 文件
From: https://github.com/wizardforcel/pandas-official-tutorials-zh/blob/master/3.3.md
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.style.use("ggplot")
# 设置种子
np.random.seed(111)
# 生成测试数据的函数
def CreateDataSet(Number=1):
Output = list()
for i in range(Number):
# 创建按周(周一)的数据范围
rng = pd.date_range(start='2009-01-01', end='2012-12-31', freq='W-MON')
# 创建随机数据
data = np.random.randint(low=25,high=1000,size=len(rng))
# 状态池
status = np.array([1,2,3], dtype=np.int32)
# 创建状态的随机列表
random_status = status[np.random.randint(low=0, high=len(status), size=len(rng), dtype=np.int32)]
# 州池
states = np.array(['GA','FL','fl','NY','NJ','TX'], dtype=np.str)
# 创建州的随机列表
random_states = states[np.random.randint(low=0, high=len(states), size=len(rng))]
Output.extend(zip(random_states, random_status, data, rng))
return Output
# 创建一些数据
dataset = CreateDataSet(4)
df = pd.DataFrame(data=dataset, columns=['State','Status','CustomerCount','StatusDate'])
df = df.set_index("StatusDate")
# 读写 Excel。算了,没装 xlrd xlwt
# df.to_excel(__file__.replace("py", "xlsx"), index=False)
# df = pd.read_excel(__file__.replace("py", "xlsx"), 0, index_col="StatusDate")
"""
此部分尝试清理要分析的数据
1. 确保 State 列全部大写
2. 仅选择帐户状态等于 1 的记录
3. 在 State 列中合并 NJ 和 NY 为 NY
4. 删除任何异常值(数据集中的任何奇怪结果)
"""
# 清理 State 列,转换为大写
df['State'] = df.State.apply(lambda x: x.upper())
# 仅仅抓取 Status == 1 的值
dfMask = (df.Status == 1)
df = df[dfMask]
# 将 NJ 变为 NY
dfMask = (df.State == 'NJ')
df['State'][dfMask] = 'NY'
# 查看 CustomerCount
# df.CustomerCount.plot(figsize=(10, 7), kind="line")
df.CustomerCount.plot.line(figsize=(10, 7))
sortdf = df[df.State == "NY"].sort_index(axis=0)
| gpl-3.0 |
rom1mouret/assortment | gen_dataset/features.py | 1 | 2971 | from sklearn.preprocessing import robust_scale
from sklearn.linear_model import Ridge
from scipy.stats import shapiro
from scipy.stats import rankdata
import numpy as np
def session_features(X, scores):
""" assuming X has been scaled properly """
subsample = np.random.choice(np.arange(scores.shape[0]), size=3000)
X_small = X[subsample]
# average Gaussianity of each feature
g = 0
n = 0
for j in range(X.shape[1]):
col = X_small[:, j]
if np.min(col) != np.max(col):
g += shapiro(col)[0]
n += 1
if n == 0:
m0 = np.nan
else:
m0 = g/n
# from statsmodels.stats.outliers_influence import variance_inflation_factor
#print("computing VIF")
#vif = [variance_inflation_factor(X_small, col) for col in range(X.shape[1])]
#m1 = np.mean(vif)
#print("VIF avg", m1)
# percentiles of scores
m2 = np.percentile(scores, 70)
m3 = np.percentile(scores, 90)
m4 = np.percentile(scores, 95)
# histogram of scores
hist, edges = np.histogram(scores, bins=5)
hist += 1
hist = np.log(hist)
hist /= np.sum(hist)
# do scores explain well the data?
m5 = score_explanation(X, scores)
# TODO: accuracy of KDE or GMM
return [m0, m2, m3, m4, m5]+hist.tolist()
def score_explanation(X, scores):
def regression_error(features, y):
#model = RandomForestRegressor()# DecisionTreeRegressor() #Ridge()
model = Ridge()
model.fit(features, y)
return np.mean(np.square(y - model.predict(features)))
# resample to speed up the regression, while keeping the alleged anomalies
ranks = rankdata(scores)
probas = ranks / np.sum(ranks)
score_selection = np.random.choice(np.arange(len(scores)), size=7000, p=probas)
X = X[score_selection]
scores = scores[score_selection]
scores = robust_scale(scores.reshape(scores.shape[0], 1)).squeeze()
# make room for storing score cubes and score log in the feature matrix
scores_cube = np.power(scores, 3)
scores_cube_idx = X.shape[1]
scores_log = np.log(1 + scores - np.min(scores))
scores_log_idx = scores_cube_idx + 1
scores_tan = np.tanh(scores) + 1
scores_tan_idx = scores_log_idx + 1
extra = ((scores_cube_idx, scores_cube), (scores_log_idx, scores_log), (scores_tan_idx, scores_tan))
X = np.concatenate([X, np.empty((X.shape[0], len(extra)))], axis=1)
predictability = 0
for j in range(X.shape[1]):
col = X[:, j].copy()
X[:, j] = 0 # or np.random.randn(X.shape[0])?
for idx, _ in extra:
X[:, idx] = 0
no_score = regression_error(X, col)
X[:, j] = scores
for idx, data in extra:
X[:, idx] = data
with_score = regression_error(X, col)
predictability += np.log(1 + max(0, no_score - with_score))
X[:, j] = col # cancel changes
predictability /= X.shape[1]
return predictability
| apache-2.0 |
patrickayoup/ebox_checkup | ebox_checkup/reporting/email_reporter.py | 1 | 4239 | """
Module for generating and sending email reports.
"""
import calendar
import datetime
import logging
import io
import os
from matplotlib import pyplot
from pa_pylibs.config.configuration import Configuration
from pa_pylibs.email.email_sender import EmailSender, EmailAttachment
from ebox_checkup import resources
class EmailReporter(object):
def __init__(self, notification_emails):
"""
:param notification_emails: The email addresses to send the report to.
:type notification_emails: list
"""
self._config = Configuration(
os.path.join(resources.__path__[0], 'ebox_checkup.conf'),
'~/.ebox_checkup/ebox_checkup.conf'
)
self._notification_emails = notification_emails
self._daily_usage_graph = None
self._monthly_totals_graph = None
self._daily_target = None
def generate_report(self, daily_usage, monthly_sum, total_this_month):
"""
Generates a report describing your internet usage.
:param daily_usage: The usage data for the daily usage in the current month.
:type daily_usage: pandas.DataFrame
:param monthly_sum: The usage sums on a monthly basis in the current year.
:type monthly_sum: pandas.DataFrame
:param total_this_month: The usage averages overall in the current month.
:type total_this_month: pandas.DataFrame
:return: An HTML report describing your internet usage.
:rtype: str
"""
logger = logging.getLogger(__name__)
logger.debug('Generating daily usage graph.')
self._daily_usage_graph = self._generate_usage_graph(daily_usage, 'Daily Usage')
logger.debug('Generating monthly usage graph.')
self._monthly_totals_graph = self._generate_usage_graph(monthly_sum, 'Monthly Totals')
now = datetime.date.today()
remainder = self._config.get_int('plan', 'total_bandwidth') - total_this_month['total']
days_left_in_month = calendar.monthrange(now.year, now.month)[1] - now.day
self._daily_target = remainder / days_left_in_month if days_left_in_month != 0 else remainder
logger.debug('Daily target is now: {}'.format(self._daily_target))
def send_report(self):
"""
Sends the HTML report to the required notification email addresses.
"""
logger = logging.getLogger(__name__)
logger.debug('Sending Report')
email_sender = EmailSender(
self._config.get_str('email', 'smtp_hostport'),
self._config.get_str('email', 'username'),
self._config.get_str('email', 'password')
)
email_sender.send_multipart(
self._config.get_str('email', 'from_address'),
'Your Internet Usage',
'You can afford to use: {} GB per day for the rest of the month.'.format(self._daily_target),
[
EmailAttachment(self._daily_usage_graph, 'daily_usage.png'),
EmailAttachment(self._monthly_totals_graph, 'monthly_usage.png')
],
self._notification_emails
)
@staticmethod
def _generate_usage_graph(usage, title):
"""
Generates a usage graph.
:param usage: The usage data.
:type usage: pandas.DataFrame
:param title: The graph title.
:type title: str
:return: The graph representing this data.
:rtype: io.BytesIO
"""
logger = logging.getLogger(__name__)
image_buffer = io.BytesIO()
with pyplot.style.context('fivethirtyeight'):
# Drop the total column.
usage = usage.drop('total', axis=1)
# Format the dates.
usage.index = usage.index.map(lambda t: t.strftime('%Y-%m-%d'))
ax = usage.plot.bar(stacked=True, width=1)
ax.set_ylabel('Usage (GB)')
ax.set_title(title)
ax.grid(False)
pyplot.tight_layout()
logger.debug('Saving graph to memory file.')
pyplot.savefig(image_buffer, format='png')
image_buffer.seek(0)
logger.debug('Clearing plotter.')
pyplot.clf()
return image_buffer
| mit |
jakereps/q2-diversity | q2_diversity/tests/test_beta_correlation.py | 2 | 1744 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import pandas as pd
import qiime2
from qiime2 import Artifact
from qiime2.plugin.testing import TestPluginBase
class BetaCorrelationTests(TestPluginBase):
package = 'q2_diversity'
def setUp(self):
super().setUp()
self.beta_correlation = self.plugin.pipelines['beta_correlation']
dm = skbio.DistanceMatrix([[0, 1, 2],
[1, 0, 1],
[2, 1, 0]],
ids=['sample1', 'sample2', 'sample3'])
self.dm = Artifact.import_data('DistanceMatrix', dm)
self.md = qiime2.NumericMetadataColumn(
pd.Series([1, 2, 3], name='number',
index=pd.Index(['sample1', 'sample2', 'sample3'],
name='id')))
def test_execution(self):
# does it run?
self.beta_correlation(self.md, self.dm)
def test_outputs(self):
result = self.beta_correlation(self.md, self.dm)
# correct number of outputs?
self.assertEqual(2, len(result))
# correct types?
self.assertEqual('DistanceMatrix',
str(result.metadata_distance_matrix.type))
self.assertEqual('Visualization',
str(result.mantel_scatter_visualization.type))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/cluster/tests/test_k_means.py | 7 | 25794 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(RuntimeWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
finfou/tushare | tushare/stock/trading.py | 1 | 23685 | # -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
df[df==''] = 0
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
df = df.sort_index(ascending = False)
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_sina_dd(code=None, date=None, retry_count=3, pause=0.001):
"""
获取sina大单数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:股票代码 股票名称 交易时间 价格 成交量 前一笔价格 类型(买、卖、中性盘)
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.SINA_DD % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['sinadd'],
symbol, date))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_csv(StringIO(lines), names=ct.SINA_DD_COLS,
skiprows=[0])
if df is not None:
df['code'] = df['code'].map(lambda x: x[2:])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages+1):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001, drop_factor=True):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
drop_factor : bool, 默认 True
是否移除复权因子,在分析过程中可能复权因子意义不大,但是如需要先储存到数据库之后再分析的话,有该项目会更加灵活
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
if drop_factor:
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending = False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
| bsd-3-clause |
frankinit/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
RayMick/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
ExaScience/smurff | python/test/test_smurff.py | 1 | 11009 | import unittest
import numpy as np
import pandas as pd
import scipy.sparse
import smurff
import itertools
import collections
verbose = 0
class TestSmurff(unittest.TestCase):
# Python 2.7 @unittest.skip fix
__name__ = "TestSmurff"
def test_bpmf(self):
Y = scipy.sparse.rand(10, 20, 0.2)
Y, Ytest = smurff.make_train_test(Y, 0.5)
predictions = smurff.smurff(Y,
Ytest=Ytest,
priors=['normal', 'normal'],
num_latent=4,
verbose=verbose,
burnin=50,
nsamples=50)
self.assertEqual(Ytest.nnz, len(predictions))
def test_bpmf_numerictest(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xt = 0.3
X, Xt = smurff.make_train_test(X, Xt)
smurff.smurff(X,
Ytest=Xt,
priors=['normal', 'normal'],
num_latent=10,
burnin=10,
nsamples=15,
verbose=verbose)
def test_macau(self):
Ydense = np.random.rand(10, 20)
r = np.random.permutation(10*20)[:40] # 40 random samples from 10*20 matrix
side1 = Ydense[:,1:2]
side2 = Ydense[1:2,:].transpose()
Y = scipy.sparse.coo_matrix(Ydense) # convert to sparse
Y = scipy.sparse.coo_matrix( (Y.data[r], (Y.row[r], Y.col[r])), shape=Y.shape )
Y, Ytest = smurff.make_train_test(Y, 0.5)
predictions = smurff.smurff(Y,
Ytest=Ytest,
priors=['macau', 'macau'],
side_info=[side1, side2],
direct=True,
# side_info_noises=[[('fixed', 1.0, None, None, None)], [('adaptive', None, 0.5, 1.0, None)]],
num_latent=4,
verbose=verbose,
burnin=50,
nsamples=50)
#self.assertEqual(Ytest.nnz, len(predictions))
def test_macau_side_bin(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xt = scipy.sparse.rand(15, 10, 0.1)
F = scipy.sparse.rand(15, 2, 0.5)
F.data[:] = 1
smurff.smurff(X,
Ytest=Xt,
priors=['macau', 'normal'],
side_info=[F, None],
direct=True,
num_latent=5,
burnin=10,
nsamples=5,
verbose=verbose)
def test_macau_dense(self):
Y = scipy.sparse.rand(15, 10, 0.2)
Yt = scipy.sparse.rand(15, 10, 0.1)
F = np.random.randn(15, 2)
smurff.smurff(Y,
Ytest=Yt,
priors=['macau', 'normal'],
side_info=[F, None],
direct=True,
num_latent=5,
burnin=10,
nsamples=5,
verbose=verbose)
def test_macau_dense_probit(self):
A = np.random.randn(25, 2)
B = np.random.randn(3, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B"])
df["value"] = (np.array([ np.sum(A[i[0], :] * B[i[1], :]) for i in idx ]) > 0.0).astype(np.float64)
Ytrain, Ytest = smurff.make_train_test(df, 0.2)
threshold = 0.5 # since we sample from mu(0,1)
trainSession = smurff.TrainSession(priors=['macau', 'normal'],
num_latent=4,
threshold=threshold,
burnin=200,
nsamples=200,
verbose=False)
trainSession.addTrainAndTest(Ytrain, Ytest, smurff.ProbitNoise(threshold))
trainSession.addSideInfo(0, A, direct=True)
predictions = trainSession.run()
auc = smurff.calc_auc(predictions, 0.5)
self.assertTrue(auc > 0.55,
msg="Probit factorization (with dense side) gave AUC below 0.55 (%f)." % auc)
def test_macau_univariate(self):
Y = scipy.sparse.rand(10, 20, 0.2)
Y, Ytest = smurff.make_train_test(Y, 0.5)
side1 = scipy.sparse.coo_matrix( np.random.rand(10, 2) )
side2 = scipy.sparse.coo_matrix( np.random.rand(20, 3) )
predictions = smurff.smurff(Y,
Ytest=Ytest,
priors=['macauone', 'macauone'],
side_info=[side1, side2],
direct=True,
num_latent=4,
verbose=verbose,
burnin=50,
nsamples=50)
self.assertEqual(Ytest.nnz, len(predictions))
def test_too_many_sides(self):
Y = scipy.sparse.rand(10, 20, 0.2)
with self.assertRaises(AssertionError):
smurff.smurff(Y,
priors=['normal', 'normal', 'normal'],
side_info=[None, None, None],
verbose = False)
def test_bpmf_emptytest(self):
X = scipy.sparse.rand(15, 10, 0.2)
smurff.smurff(X,
priors=['normal', 'normal'],
num_latent=10,
burnin=10,
nsamples=15,
verbose=verbose)
def test_bpmf_emptytest_probit(self):
X = scipy.sparse.rand(15, 10, 0.2)
X.data = X.data > 0.5
smurff.smurff(X,
priors=['normal', 'normal'],
num_latent=10,
burnin=10,
nsamples=15,
verbose=verbose)
def test_make_train_test(self):
X = scipy.sparse.rand(15, 10, 0.2)
Xtr, Xte = smurff.make_train_test(X, 0.5)
self.assertEqual(X.nnz, Xtr.nnz + Xte.nnz)
diff = np.linalg.norm( (X - Xtr - Xte).todense() )
self.assertEqual(diff, 0.0)
def test_make_train_test(self):
nnz = 10 * 8 * 3
idx = list( itertools.product(np.arange(10), np.arange(8), np.arange(3) ))
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.arange(float(nnz))
Ytr, Yte = smurff.make_train_test(df, 0.4)
self.assertEqual(Ytr.nnz, nnz * 0.6)
self.assertEqual(Yte.nnz, nnz * 0.4)
A1 = np.zeros( (10, 8, 3) )
A2 = np.zeros( (10, 8, 3) )
A1[df.A, df.B, df.C] = df.value
A2[Ytr.columns[0], Ytr.columns[1], Ytr.columns[2]] = Ytr.values
A2[Yte.columns[0], Yte.columns[1], Yte.columns[2]] = Yte.values
self.assertTrue(np.allclose(A1, A2))
def test_bpmf_tensor(self):
np.random.seed(1234)
shape = [5,4,3]
Y = smurff.SparseTensor(pd.DataFrame({
"A": np.random.randint(0, 5, 7),
"B": np.random.randint(0, 4, 7),
"C": np.random.randint(0, 3, 7),
"value": np.random.randn(7)
}),shape)
Ytest = smurff.SparseTensor(pd.DataFrame({
"A": np.random.randint(0, 5, 5),
"B": np.random.randint(0, 4, 5),
"C": np.random.randint(0, 3, 5),
"value": np.random.randn(5)
}),shape)
predictions = smurff.smurff(Y,
Ytest=Ytest,
priors=['normal', 'normal', 'normal'],
num_latent=4,
verbose=verbose,
burnin=50,
nsamples=50)
def test_bpmf_tensor2(self):
A = np.random.randn(15, 2)
B = np.random.randn(20, 2)
C = np.random.randn(3, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = smurff.make_train_test(df, 0.2)
predictions = smurff.smurff(Ytrain,
Ytest=Ytest,
priors=['normal', 'normal', 'normal'],
num_latent=4,
verbose=verbose,
burnin=20,
nsamples=20)
rmse = smurff.calc_rmse(predictions)
self.assertTrue(rmse < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % rmse)
def test_bpmf_tensor3(self):
A = np.random.randn(15, 2)
B = np.random.randn(20, 2)
C = np.random.randn(1, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Ytrain, Ytest = smurff.make_train_test(df, 0.2)
predictions = smurff.smurff(Ytrain,
Ytest=Ytest,
priors=['normal', 'normal', 'normal'],
num_latent=4,
verbose=verbose,
burnin=20,
nsamples=20)
rmse = smurff.calc_rmse(predictions)
self.assertTrue(rmse < 0.5,
msg="Tensor factorization gave RMSE above 0.5 (%f)." % rmse)
def test_macau_tensor_empty(self):
A = np.random.randn(30, 2)
B = np.random.randn(4, 2)
C = np.random.randn(2, 2)
idx = list( itertools.product(np.arange(A.shape[0]), np.arange(B.shape[0]), np.arange(C.shape[0])) )
df = pd.DataFrame( np.asarray(idx), columns=["A", "B", "C"])
df["value"] = np.array([ np.sum(A[i[0], :] * B[i[1], :] * C[i[2], :]) for i in idx ])
Acoo = scipy.sparse.coo_matrix(A)
predictions = smurff.smurff(smurff.SparseTensor(df),
priors=['normal', 'normal', 'normal'],
num_latent=2,
burnin=5,
nsamples=5,
verbose=verbose)
self.assertFalse(predictions)
def test_threads(self):
Y = scipy.sparse.rand(10, 20, 0.2)
for t in range(7): # 1, 2, 4, 8, 16, 32, 64
smurff.smurff(Y, priors=['normal', 'normal'], num_latent=4, num_threads=2**t, verbose=verbose, burnin=5, nsamples=5)
if __name__ == '__main__':
unittest.main()
| mit |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/specialty_plots/anscombe.py | 1 | 2753 | """
==================
Anscombe's Quartet
==================
"""
from __future__ import print_function
"""
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.array([np.min(x), np.max(x)])
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticks=(0, 10, 20), xticklabels=[],
yticks=(4, 8, 12), yticklabels=[], )
plt.text(3, 12, 'II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.text(3, 12, 'III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.min(x4), np.max(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'IV', fontsize=20)
# verify the stats
pairs = (x, y1), (x, y2), (x, y3), (x4, y4)
for x, y in pairs:
print('mean=%1.2f, std=%1.2f, r=%1.2f' % (np.mean(y), np.std(y),
np.corrcoef(x, y)[0][1]))
pltshow(plt)
| mit |
kaurav/image-process | get.py | 1 | 3532 | '''libraries to be imported '''
import cv2
import numpy as np
import scipy.ndimage as sy
import matplotlib.pylab as py
from skimage.measure import regionprops
import os
#function to find blob of particular shape in binary image
''' getshape take three paramter binary image, orignal image and
shape which is to be found'''
def getshape1(binary,orignal,shape):
r,w=sy.measurements.label(binary)
print(w)
pro=regionprops(r)
cmax={'circle':1,'square':0.84999,'triangle':0.71}
cmin={'circle':0.85,'square':0.720,'triangle':0.30}
emax={'circle':0.90,'square':1,'triangle':0.70}
emin={'circle':0.70,'square':0.90,'triangle':0.30}
print('circlularity' ,'extent', 'orientation','aspect ratio',
'eccentricity')
for a in range(len(pro)):
q=(4*22*pro[a].area)/(7*(pro[a].perimeter**2))
print(q,pro[a].extent, pro[a].orientation,pro[a].eccentricity)
if(q>=cmin[shape] and q<=cmax[shape] ):
minr, minc, maxr, maxc=pro[a].bbox
cv2.rectangle(orignal,(minc,minr),(maxc,maxr),(1,1,1),5)
cv2.circle(orignal,
(int(pro[a].centroid[1]),int(pro[a].centroid[0])),
1,(1,1,1),5)
return orignal
#function to find blob of particular shape in binary image using contours
def getshape(binary,orignal,shape):
contours, hierarchy = cv2.findContours(
binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cmax={'circle':16,'square':8,'triangle':6}
for a in contours :
epsilon = 0.02*cv2.arcLength(a,True)
approx = cv2.approxPolyDP(a,epsilon,True)
print(approx.size)
if (approx.size == cmax[shape]):
cv2.drawContours(orignal,[a],0, (int(40),int(255),int(25)), 3)
return orignal,contours
'''function to find basic RGB color in image using threshold'''
def getcolor1 (img,color,n=0.44):
n=float(n)
img=chroma(img)
B,G,R=cv2.split(img)
if(color=='red'):
c=R>n
c=c*255
if(color=='green'):
c=G>n
c=c*255
if(color=='blue'):
c=B>n
c=c*255
c=c.astype('uint8')
kernal=np.ones((7,7),np.uint8)
c=cv2.morphologyEx( c, cv2.MORPH_OPEN,kernal)
c=cv2.morphologyEx( c, cv2.MORPH_CLOSE,kernal)
return c
'''function to find chromatacity of image'''
def chroma(img):
img=img.astype('float32')
img=img**(22/10)
b,g,r=cv2.split(img)
Y=r+b+g
R=r/Y
G=g/Y
B=b/Y
return cv2.merge([B,G,R])
''' function to find colors in image using HSV'''
def getcolor(img,color):
a=chroma(img)
a=a*255
a=a.astype('uint8')
hsv = cv2.cvtColor(a, cv2.COLOR_BGR2HSV)
input=readtxt(color)
lower_color = np.array([input[0],input[1],input[2]])
upper_color = np.array([input[3],input[4],input[5]])
mask = cv2.inRange(hsv, lower_color, upper_color)
kernal=np.ones((33,33),np.uint8)
c=cv2.morphologyEx( mask, cv2.MORPH_OPEN,kernal)
kernal=np.ones((5,5),np.uint8)
c=cv2.morphologyEx(mask, cv2.MORPH_CLOSE,kernal)
if(color == 'Red'):
d=getcolor(img,'Red1')
c=d+c
return c
'''function to find homography points for making homograpy matrix'''
def gethomograph(img1):
c=getcolor(img1,'blue',0.45)
r,w=sy.measurements.label(c)
pro=regionprops(r)
H=np.ones((9, 2), dtype=np.float32)
b=0
c=0
for a in range(len(pro)):
q=n(4*pi*pro[a].area/pro[a].perimeter^2)
H[a][0]=pro[a].centroid[0]
H[a][1]=pro[a].centroid[1]
cv2.circle(img1,(int(pro[a].centroid[1]),int(pro[a].centroid[0]))
,5,(a,b,c))
return H
'''function to read data of color from image'''
def readtxt( name ):
file = open('getcolor.txt')
a = file.readlines()
d=[]
for c in a:
c = c.split(' ' or '/n')
c=list(c)
if c[0] == name :
for e in range(len(c)):
if(c[e].isdigit()):
d.append(int(c[e]))
return d
| gpl-2.0 |
strint/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 2 | 36442 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffold(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaises(ValueError):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
siou83/trading-with-python | lib/interactiveBrokers/histData.py | 76 | 6472 | '''
Created on May 8, 2013
Copyright: Jev Kuznetsov
License: BSD
Module for downloading historic data from IB
'''
import ib
import pandas as pd
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
import logger as logger
from pandas import DataFrame, Index
import os
import datetime as dt
import time
from time import sleep
from extra import timeFormat, dateFormat
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pd.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1 D',barSizeSetting='30 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
if isinstance(endDateTime,dt.datetime): # convert to string
endDateTime = endDateTime.strftime(timeFormat)
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
# def getIntradayData(self,contract, dateTuple ):
# ''' get full day data on 1-s interval
# date: a tuple of (yyyy,mm,dd)
# '''
#
# openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
# closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
#
# timeRange = pd.date_range(openTime,closeTime,freq='30min')
#
# datasets = []
#
# for t in timeRange:
# datasets.append(self.requestData(contract,t.strftime(timeFormat)))
#
# return pd.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class _HistDataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date,dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class TimeKeeper(object):
'''
class for keeping track of previous requests, to satify the IB requirements
(max 60 requests / 10 min)
each time a requiest is made, a timestamp is added to a txt file in the user dir.
'''
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~')+'/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir,'requests.txt'))
# Create file if it's missing
if not os.path.exists(self.dataFile):
open(self.dataFile,'w').close()
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
''' adds a timestamp of current request'''
with open(self.dataFile,'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat)+'\n')
def nrRequests(self,timeSpan=600):
''' return number of requests in past timespan (s) '''
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile,'r') as f:
lines = f.readlines()
for line in lines:
if now-dt.datetime.strptime(line.strip(),self._timeFormat) < delta:
requests+=1
if requests==0: # erase all contents if no requests are relevant
open(self.dataFile,'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
if __name__ == '__main__':
from extra import createContract
dl = Downloader(debug=True) # historic data downloader class
contract = createContract('SPY') # create contract using defaults (STK,SMART,USD)
data = dl.requestData(contract,"20141208 16:00:00 EST") # request 30-second data bars up till now
data.to_csv('SPY.csv') # write data to csv
print 'Done' | bsd-3-clause |
jtrussell/think-bayes-workspace | src/vendor/AllenDowney/species.py | 1 | 52910 | """This file contains code used in "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print self.code, clean_param
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes.MakeCdfFromItems(enumerate(counts))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print count, name
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes.MakeCdfFromItems(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print '90% CI for N:', pmf.CredibleInterval(90)
pmf.name = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.name = '%d (%d)' % (rank, count)
print '90%% CI for prevalence of species %d:' % rank,
print mix.CredibleInterval(90)
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print self.GetSpecies(index)
print self.GetCounts()[index]
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes.MakeCdfFromItems(items)
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes.MakeHistFromList(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes.Pmf(name=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of num_new conditioned on k.
curves: list of (k, num_new) curves
ks: list of values of k
Returns: list of Cdfs
"""
joint = MakeJointPredictive(curves)
cdfs = []
for k in ks:
pmf = joint.Conditional(1, 0, k)
pmf.name = 'k=%d' % k
cdf = pmf.MakeCdf()
cdfs.append(cdf)
print '90%% credible interval for %d' % k,
print cdf.CredibleInterval(90)
return cdfs
def MakeJointPredictive(curves):
"""Makes a joint distribution of k and num_new.
curves: list of (k, num_new) curves
Returns: joint Pmf of (k, num_new)
"""
joint = thinkbayes.Joint()
for curve in curves:
for k, num_new in curve:
joint.Incr((k, num_new))
joint.Normalize()
return joint
def MakeFracCdfs(curves, ks):
"""Makes Cdfs of the fraction of species seen.
curves: list of (k, num_new) curves
Returns: list of Cdfs
"""
d = {}
for curve in curves:
for k, frac in curve:
if k in ks:
d.setdefault(k, []).append(frac)
cdfs = {}
for k, fracs in d.iteritems():
cdf = thinkbayes.MakeCdfFromList(fracs)
cdfs[k] = cdf
return cdfs
def SpeciesGenerator(names, num):
"""Generates a series of names, starting with the given names.
Additional names are 'unseen' plus a serial number.
names: list of strings
num: total number of species names to generate
Returns: string iterator
"""
i = 0
for name in names:
yield name
i += 1
while i < num:
yield 'unseen-%d' % i
i += 1
def ReadRarefactedData(filename='journal.pone.0047712.s001.csv',
clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from http://www.plosone.org/article/
info%3Adoi%2F10.1371%2Fjournal.pone.0047712#s4
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
_ = reader.next()
subject = Subject('')
subject_map = {}
i = 0
for t in reader:
code = t[0]
if code != subject.code:
# start a new subject
subject = Subject(code)
subject_map[code] = subject
# append a number to the species names so they're unique
species = t[1]
species = '%s-%d' % (species, i)
i += 1
count = int(t[2])
subject.Add(species, count)
for code, subject in subject_map.iteritems():
subject.Done(clean_param=clean_param)
return subject_map
def ReadCompleteDataset(filename='BBB_data_from_Rob.csv', clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from personal correspondence with Rob Dunn, received 2-7-13.
Converted from xlsx to csv.
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
header = reader.next()
header = reader.next()
subject_codes = header[1:-1]
subject_codes = ['B'+code for code in subject_codes]
# create the subject map
uber_subject = Subject('uber')
subject_map = {}
for code in subject_codes:
subject_map[code] = Subject(code)
# read lines
i = 0
for t in reader:
otu_code = t[0]
if otu_code == '':
continue
# pull out a species name and give it a number
otu_names = t[-1]
taxons = otu_names.split(';')
species = taxons[-1]
species = '%s-%d' % (species, i)
i += 1
counts = [int(x) for x in t[1:-1]]
# print otu_code, species
for code, count in zip(subject_codes, counts):
if count > 0:
subject_map[code].Add(species, count)
uber_subject.Add(species, count)
uber_subject.Done(clean_param=clean_param)
for code, subject in subject_map.iteritems():
subject.Done(clean_param=clean_param)
return subject_map, uber_subject
def JoinSubjects():
"""Reads both datasets and computers their inner join.
Finds all subjects that appear in both datasets.
For subjects in the rarefacted dataset, looks up the total
number of reads and stores it as total_reads. num_reads
is normally 400.
Returns: map from code to Subject
"""
# read the rarefacted dataset
sampled_subjects = ReadRarefactedData()
# read the complete dataset
all_subjects, _ = ReadCompleteDataset()
for code, subject in sampled_subjects.iteritems():
if code in all_subjects:
match = all_subjects[code]
subject.Match(match)
return sampled_subjects
def JitterCurve(curve, dx=0.2, dy=0.3):
"""Adds random noise to the pairs in a curve.
dx and dy control the amplitude of the noise in each dimension.
"""
curve = [(x+random.uniform(-dx, dx),
y+random.uniform(-dy, dy)) for x, y in curve]
return curve
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3):
"""Adds random noise to the pairs in a curve.
i is the index of the curve
n is the number of curves
dx and dy control the amplitude of the noise in each dimension.
"""
xoff = -dx + 2 * dx * i / (n-1)
yoff = -dy + 2 * dy * i / (n-1)
curve = [(x+xoff, y+yoff) for x, y in curve]
return curve
def PlotCurves(curves, root='species-rare'):
"""Plots a set of curves.
curves is a list of curves; each curve is a list of (x, y) pairs.
"""
thinkplot.Clf()
color = '#225EA8'
n = len(curves)
for i, curve in enumerate(curves):
curve = OffsetCurve(curve, i, n)
xs, ys = zip(*curve)
thinkplot.Plot(xs, ys, color=color, alpha=0.3, linewidth=0.5)
thinkplot.Save(root=root,
xlabel='# samples',
ylabel='# species',
formats=FORMATS,
legend=False)
def PlotConditionals(cdfs, root='species-cond'):
"""Plots cdfs of num_new conditioned on k.
cdfs: list of Cdf
root: string filename root
"""
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root=root,
xlabel='# new species',
ylabel='Prob',
formats=FORMATS)
def PlotFracCdfs(cdfs, root='species-frac'):
"""Plots CDFs of the fraction of species seen.
cdfs: map from k to CDF of fraction of species seen after k samples
"""
thinkplot.Clf()
color = '#225EA8'
for k, cdf in cdfs.iteritems():
xs, ys = cdf.Render()
ys = [1-y for y in ys]
thinkplot.Plot(xs, ys, color=color, linewidth=1)
x = 0.9
y = 1 - cdf.Prob(x)
pyplot.text(x, y, str(k), fontsize=9, color=color,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='white', edgecolor='none'))
thinkplot.Save(root=root,
xlabel='Fraction of species seen',
ylabel='Probability',
formats=FORMATS,
legend=False)
class Species(thinkbayes.Suite):
"""Represents hypotheses about the number of species."""
def __init__(self, ns, conc=1, iters=1000):
hypos = [thinkbayes.Dirichlet(n, conc) for n in ns]
thinkbayes.Suite.__init__(self, hypos)
self.iters = iters
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
# call Update in the parent class, which calls Likelihood
thinkbayes.Suite.Update(self, data)
# update the next level of the hierarchy
for hypo in self.Values():
hypo.Update(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
# draw sample Likelihoods from the hypothetical Dirichlet dist
# and add them up
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of ways the observed species
# might have been chosen from all species
m = len(data)
like *= thinkbayes.BinomialCoef(dirichlet.n, m)
return like
def DistN(self):
"""Computes the distribution of n."""
pmf = thinkbayes.Pmf()
for hypo, prob in self.Items():
pmf.Set(hypo.n, prob)
return pmf
class Species2(object):
"""Represents hypotheses about the number of species.
Combines two layers of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def __init__(self, ns, conc=1, iters=1000):
self.ns = ns
self.conc = conc
self.probs = numpy.ones(len(ns), dtype=numpy.float)
self.params = numpy.ones(self.ns[-1], dtype=numpy.float) * conc
self.iters = iters
self.num_reads = 0
self.m = 0
def Preload(self, data):
"""Change the initial parameters to fit the data better.
Just an experiment. Doesn't work.
"""
m = len(data)
singletons = data.count(1)
num = m - singletons
print m, singletons, num
addend = numpy.ones(num, dtype=numpy.float) * 1
print len(addend)
print len(self.params[singletons:m])
self.params[singletons:m] += addend
print 'Preload', num
def Update(self, data):
"""Updates the distribution based on data.
data: numpy array of counts
"""
self.num_reads += sum(data)
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
self.m = len(data)
#self.params[:self.m] += data * self.conc
self.params[:self.m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data for all values of n.
Draws one sample from the distribution of prevalences.
data: sequence of observed counts
Returns: numpy array of m likelihoods
"""
gammas = numpy.random.gamma(self.params)
m = len(data)
row = gammas[:m]
col = numpy.cumsum(gammas)
log_likes = []
for n in self.ns:
ps = row / col[n-1]
terms = numpy.log(ps) * data
log_like = terms.sum()
log_likes.append(log_like)
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
coefs = [thinkbayes.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
def DistN(self):
"""Computes the distribution of n.
Returns: new Pmf object
"""
pmf = thinkbayes.MakePmfFromItems(zip(self.ns, self.probs))
return pmf
def RandomN(self):
"""Returns a random value of n."""
return self.DistN().Random()
def DistQ(self, iters=100):
"""Computes the distribution of q based on distribution of n.
Returns: pmf of q
"""
cdf_n = self.DistN().MakeCdf()
sample_n = cdf_n.Sample(iters)
pmf = thinkbayes.Pmf()
for n in sample_n:
q = self.RandomQ(n)
pmf.Incr(q)
pmf.Normalize()
return pmf
def RandomQ(self, n):
"""Returns a random value of q.
Based on n, self.num_reads and self.conc.
n: number of species
Returns: q
"""
# generate random prevalences
dirichlet = thinkbayes.Dirichlet(n, conc=self.conc)
prevalences = dirichlet.Random()
# generate a simulated sample
pmf = thinkbayes.MakePmfFromItems(enumerate(prevalences))
cdf = pmf.MakeCdf()
sample = cdf.Sample(self.num_reads)
seen = set(sample)
# add up the prevalence of unseen species
q = 0
for species, prev in enumerate(prevalences):
if species not in seen:
q += prev
return q
def MarginalBeta(self, n, index):
"""Computes the conditional distribution of the indicated species.
n: conditional number of species
index: which species
Returns: Beta object representing a distribution of prevalence.
"""
alpha0 = self.params[:n].sum()
alpha = self.params[index]
return thinkbayes.Beta(alpha, alpha0-alpha)
def DistOfPrevalence(self, index):
"""Computes the distribution of prevalence for the indicated species.
index: which species
Returns: (metapmf, mix) where metapmf is a MetaPmf and mix is a Pmf
"""
metapmf = thinkbayes.Pmf()
for n, prob in zip(self.ns, self.probs):
beta = self.MarginalBeta(n, index)
pmf = beta.MakePmf()
metapmf.Set(pmf, prob)
mix = thinkbayes.MakeMixture(metapmf)
return metapmf, mix
def SamplePosterior(self):
"""Draws random n and prevalences.
Returns: (n, prevalences)
"""
n = self.RandomN()
prevalences = self.SamplePrevalences(n)
#print 'Peeking at n_cheat'
#n = n_cheat
return n, prevalences
def SamplePrevalences(self, n):
"""Draws a sample of prevalences given n.
n: the number of species assumed in the conditional
Returns: numpy array of n prevalences
"""
if n == 1:
return [1.0]
q_desired = self.RandomQ(n)
q_desired = max(q_desired, 1e-6)
params = self.Unbias(n, self.m, q_desired)
gammas = numpy.random.gamma(params)
gammas /= gammas.sum()
return gammas
def Unbias(self, n, m, q_desired):
"""Adjusts the parameters to achieve desired prev_unseen (q).
n: number of species
m: seen species
q_desired: prevalence of unseen species
"""
params = self.params[:n].copy()
if n == m:
return params
x = sum(params[:m])
y = sum(params[m:])
a = x + y
#print x, y, a, x/a, y/a
g = q_desired * a / y
f = (a - g * y) / x
params[:m] *= f
params[m:] *= g
return params
class Species3(Species2):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observations
"""
# sample the likelihoods and add them up
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
m = len(data)
self.params[:m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data under all hypotheses.
data: list of observations
"""
# get a random sample
gammas = numpy.random.gamma(self.params)
# row is just the first m elements of gammas
m = len(data)
row = gammas[:m]
# col is the cumulative sum of gammas
col = numpy.cumsum(gammas)[self.ns[0]-1:]
# each row of the array is a set of ps, normalized
# for each hypothetical value of n
array = row / col[:, numpy.newaxis]
# computing the multinomial PDF under a log transform
# take the log of the ps and multiply by the data
terms = numpy.log(array) * data
# add up the rows
log_likes = terms.sum(axis=1)
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
# correct for the number of ways we could see m species
# out of a possible n
coefs = [thinkbayes.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
class Species4(Species):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
m = len(data)
# loop through the species and update one at a time
for i in range(m):
one = numpy.zeros(i+1)
one[i] = data[i]
# call the parent class
Species.Update(self, one)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
Note: this only works correctly if we update one species at a time.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of unseen species the new one
# could have been
m = len(data)
num_unseen = dirichlet.n - m + 1
like *= num_unseen
return like
class Species5(Species2):
"""Represents hypotheses about the number of species.
Combines two laters of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies in increasing order
"""
# loop through the species and update one at a time
m = len(data)
for i in range(m):
self.UpdateOne(i+1, data[i])
self.params[i] += data[i]
def UpdateOne(self, i, count):
"""Updates the suite based on the data.
Evaluates the likelihood for all values of n.
i: which species was observed (1..n)
count: how many were observed
"""
# how many species have we seen so far
self.m = i
# how many reads have we seen
self.num_reads += count
if self.iters == 0:
return
# sample the likelihoods and add them up
likes = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
likes += self.SampleLikelihood(i, count)
# correct for the number of unseen species the new one
# could have been
unseen_species = [n-i+1 for n in self.ns]
likes *= unseen_species
# multiply the priors by the likelihoods and renormalize
self.probs *= likes
self.probs /= self.probs.sum()
def SampleLikelihood(self, i, count):
"""Computes the likelihood of the data under all hypotheses.
i: which species was observed
count: how many were observed
"""
# get a random sample of p
gammas = numpy.random.gamma(self.params)
# sums is the cumulative sum of p, for each value of n
sums = numpy.cumsum(gammas)[self.ns[0]-1:]
# get p for the mth species, for each value of n
ps = gammas[i-1] / sums
log_likes = numpy.log(ps) * count
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
return likes
def MakePosterior(constructor, data, ns, conc=1, iters=1000):
"""Makes a suite, updates it and returns the posterior suite.
Prints the elapsed time.
data: observed species and their counts
ns: sequence of hypothetical ns
conc: concentration parameter
iters: how many samples to draw
Returns: posterior suite of the given type
"""
suite = constructor(ns, conc=conc, iters=iters)
# print constructor.__name__
start = time.time()
suite.Update(data)
end = time.time()
print 'Processing time', end-start
return suite
def PlotAllVersions():
"""Makes a graph of posterior distributions of N."""
data = [1, 2, 3]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.name = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Save(root='species3',
xlabel='Number of species',
ylabel='Prob')
def PlotMedium():
"""Makes a graph of posterior distributions of N."""
data = [1, 1, 1, 1, 2, 3, 5, 9]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.name = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def SimpleDirichletExample():
"""Makes a plot showing posterior distributions for three species.
This is the case where we know there are exactly three species.
"""
thinkplot.Clf()
thinkplot.PrePlot(3)
names = ['lions', 'tigers', 'bears']
data = [3, 2, 1]
dirichlet = thinkbayes.Dirichlet(3)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print 'mean', names[i], beta.Mean()
dirichlet.Update(data)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print 'mean', names[i], beta.Mean()
pmf = beta.MakePmf(name=names[i])
thinkplot.Pmf(pmf)
thinkplot.Save(root='species1',
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
)
def HierarchicalExample():
"""Shows the posterior distribution of n for lions, tigers and bears.
"""
ns = range(3, 30)
suite = Species(ns, iters=8000)
data = [3, 2, 1]
suite.Update(data)
thinkplot.Clf()
thinkplot.PrePlot(num=1)
pmf = suite.DistN()
thinkplot.Pmf(pmf)
thinkplot.Save(root='species2',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def CompareHierarchicalExample():
"""Makes a graph of posterior distributions of N."""
data = [3, 2, 1]
m = len(data)
n = 30
ns = range(m, n)
constructors = [Species, Species5]
iters = [1000, 100]
for constructor, iters in zip(constructors, iters):
suite = MakePosterior(constructor, data, ns, iters)
pmf = suite.DistN()
pmf.name = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def ProcessSubjects(codes):
"""Process subjects with the given codes and plot their posteriors.
code: sequence of string codes
"""
thinkplot.Clf()
thinkplot.PrePlot(len(codes))
subjects = ReadRarefactedData()
pmfs = []
for code in codes:
subject = subjects[code]
subject.Process()
pmf = subject.suite.DistN()
pmf.name = subject.code
thinkplot.Pmf(pmf)
pmfs.append(pmf)
print 'ProbGreater', thinkbayes.PmfProbGreater(pmfs[0], pmfs[1])
print 'ProbLess', thinkbayes.PmfProbLess(pmfs[0], pmfs[1])
thinkplot.Save(root='species4',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def RunSubject(code, conc=1, high=500):
"""Run the analysis for the subject with the given code.
code: string code
"""
subjects = JoinSubjects()
subject = subjects[code]
subject.Process(conc=conc, high=high, iters=300)
subject.MakeQuickPrediction()
PrintSummary(subject)
actual_l = subject.total_species - subject.num_species
cdf_l = subject.DistL().MakeCdf()
PrintPrediction(cdf_l, actual_l)
subject.MakeFigures()
num_reads = 400
curves = subject.RunSimulations(100, num_reads)
root = 'species-rare-%s' % subject.code
PlotCurves(curves, root=root)
num_reads = 800
curves = subject.RunSimulations(500, num_reads)
ks = [100, 200, 400, 800]
cdfs = MakeConditionals(curves, ks)
root = 'species-cond-%s' % subject.code
PlotConditionals(cdfs, root=root)
num_reads = 1000
curves = subject.RunSimulations(500, num_reads, frac_flag=True)
ks = [10, 100, 200, 400, 600, 800, 1000]
cdfs = MakeFracCdfs(curves, ks)
root = 'species-frac-%s' % subject.code
PlotFracCdfs(cdfs, root=root)
def PrintSummary(subject):
"""Print a summary of a subject.
subject: Subject
"""
print subject.code
print 'found %d species in %d reads' % (subject.num_species,
subject.num_reads)
print 'total %d species in %d reads' % (subject.total_species,
subject.total_reads)
cdf = subject.suite.DistN().MakeCdf()
print 'n'
PrintPrediction(cdf, 'unknown')
def PrintPrediction(cdf, actual):
"""Print a summary of a prediction.
cdf: predictive distribution
actual: actual value
"""
median = cdf.Percentile(50)
low, high = cdf.CredibleInterval(75)
print 'predicted %0.2f (%0.2f %0.2f)' % (median, low, high)
print 'actual', actual
def RandomSeed(x):
"""Initialize random.random and numpy.random.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
def GenerateFakeSample(n, r, tr, conc=1):
"""Generates fake data with the given parameters.
n: number of species
r: number of reads in subsample
tr: total number of reads
conc: concentration parameter
Returns: hist of all reads, hist of subsample, prev_unseen
"""
# generate random prevalences
dirichlet = thinkbayes.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes.MakePmfFromItems(enumerate(prevalences))
cdf = pmf.MakeCdf()
sample = cdf.Sample(tr)
# collect the species counts
hist = thinkbayes.MakeHistFromList(sample)
# extract a subset of the data
if tr > r:
random.shuffle(sample)
subsample = sample[:r]
subhist = thinkbayes.MakeHistFromList(subsample)
else:
subhist = hist
# add up the prevalence of unseen species
prev_unseen = 0
for species, prev in enumerate(prevalences):
if species not in subhist:
prev_unseen += prev
return hist, subhist, prev_unseen
def PlotActualPrevalences():
"""Makes a plot comparing actual prevalences with a model.
"""
# read data
subject_map, _ = ReadCompleteDataset()
# for subjects with more than 50 species,
# PMF of max prevalence, and PMF of max prevalence
# generated by a simulation
pmf_actual = thinkbayes.Pmf()
pmf_sim = thinkbayes.Pmf()
# concentration parameter used in the simulation
conc = 0.06
for code, subject in subject_map.iteritems():
prevalences = subject.GetPrevalences()
m = len(prevalences)
if m < 2:
continue
actual_max = max(prevalences)
print code, m, actual_max
# incr the PMFs
if m > 50:
pmf_actual.Incr(actual_max)
pmf_sim.Incr(SimulateMaxPrev(m, conc))
# plot CDFs for the actual and simulated max prevalence
cdf_actual = pmf_actual.MakeCdf(name='actual')
cdf_sim = pmf_sim.MakeCdf(name='sim')
thinkplot.Cdfs([cdf_actual, cdf_sim])
thinkplot.Show()
def ScatterPrevalences(ms, actual):
"""Make a scatter plot of actual prevalences and expected values.
ms: sorted sequence of in m (number of species)
actual: sequence of actual max prevalence
"""
for conc in [1, 0.5, 0.2, 0.1]:
expected = [ExpectedMaxPrev(m, conc) for m in ms]
thinkplot.Plot(ms, expected)
thinkplot.Scatter(ms, actual)
thinkplot.Show(xscale='log')
def SimulateMaxPrev(m, conc=1):
"""Returns random max prevalence from a Dirichlet distribution.
m: int number of species
conc: concentration parameter of the Dirichlet distribution
Returns: float max of m prevalences
"""
dirichlet = thinkbayes.Dirichlet(m, conc)
prevalences = dirichlet.Random()
return max(prevalences)
def ExpectedMaxPrev(m, conc=1, iters=100):
"""Estimate expected max prevalence.
m: number of species
conc: concentration parameter
iters: how many iterations to run
Returns: expected max prevalence
"""
dirichlet = thinkbayes.Dirichlet(m, conc)
t = []
for _ in range(iters):
prevalences = dirichlet.Random()
t.append(max(prevalences))
return numpy.mean(t)
class Calibrator(object):
"""Encapsulates the calibration process."""
def __init__(self, conc=0.1):
"""
"""
self.conc = conc
self.ps = range(10, 100, 10)
self.total_n = numpy.zeros(len(self.ps))
self.total_q = numpy.zeros(len(self.ps))
self.total_l = numpy.zeros(len(self.ps))
self.n_seq = []
self.q_seq = []
self.l_seq = []
def Calibrate(self, num_runs=100, n_low=30, n_high=400, r=400, tr=1200):
"""Runs calibrations.
num_runs: how many runs
"""
for seed in range(num_runs):
self.RunCalibration(seed, n_low, n_high, r, tr)
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def Validate(self, num_runs=100, clean_param=0):
"""Runs validations.
num_runs: how many runs
"""
subject_map, _ = ReadCompleteDataset(clean_param=clean_param)
i = 0
for match in subject_map.itervalues():
if match.num_reads < 400:
continue
num_reads = 100
print 'Validate', match.code
subject = match.Resample(num_reads)
subject.Match(match)
n_actual = None
q_actual = subject.prev_unseen
l_actual = subject.total_species - subject.num_species
self.RunSubject(subject, n_actual, q_actual, l_actual)
i += 1
if i == num_runs:
break
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def PlotN(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
xs, ys = zip(*self.n_seq)
if None in xs:
return
high = max(xs+ys)
thinkplot.Plot([0, high], [0, high], color='gray')
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual n',
ylabel='Predicted')
def PlotQ(self, root='species-q'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
thinkplot.Plot([0, 0.2], [0, 0.2], color='gray')
xs, ys = zip(*self.q_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual q',
ylabel='Predicted')
def PlotL(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual l.
"""
thinkplot.Plot([0, 20], [0, 20], color='gray')
xs, ys = zip(*self.l_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual l',
ylabel='Predicted')
def PlotCalibrationCurves(self, root='species5'):
"""Plots calibration curves"""
print self.total_n
print self.total_q
print self.total_l
thinkplot.Plot([0, 100], [0, 100], color='gray', alpha=0.2)
if self.total_n[0] >= 0:
thinkplot.Plot(self.ps, self.total_n, label='n')
thinkplot.Plot(self.ps, self.total_q, label='q')
thinkplot.Plot(self.ps, self.total_l, label='l')
thinkplot.Save(root=root,
axis=[0, 100, 0, 100],
xlabel='Ideal percentages',
ylabel='Predictive distributions',
formats=FORMATS,
)
def RunCalibration(self, seed, n_low, n_high, r, tr):
"""Runs a single calibration run.
Generates N and prevalences from a Dirichlet distribution,
then generates simulated data.
Runs analysis to get the posterior distributions.
Generates calibration curves for each posterior distribution.
seed: int random seed
"""
# generate a random number of species and their prevalences
# (from a Dirichlet distribution with alpha_i = conc for all i)
RandomSeed(seed)
n_actual = random.randrange(n_low, n_high+1)
hist, subhist, q_actual = GenerateFakeSample(
n_actual,
r,
tr,
self.conc)
l_actual = len(hist) - len(subhist)
print 'Run low, high, conc', n_low, n_high, self.conc
print 'Run r, tr', r, tr
print 'Run n, q, l', n_actual, q_actual, l_actual
# extract the data
data = [count for species, count in subhist.Items()]
data.sort()
print 'data', data
# make a Subject and process
subject = Subject('simulated')
subject.num_reads = r
subject.total_reads = tr
for species, count in subhist.Items():
subject.Add(species, count)
subject.Done()
self.RunSubject(subject, n_actual, q_actual, l_actual)
def RunSubject(self, subject, n_actual, q_actual, l_actual):
"""Runs the analysis for a subject.
subject: Subject
n_actual: number of species
q_actual: prevalence of unseen species
l_actual: number of new species
"""
# process and make prediction
subject.Process(conc=self.conc, iters=100)
subject.MakeQuickPrediction()
# extract the posterior suite
suite = subject.suite
# check the distribution of n
pmf_n = suite.DistN()
print 'n'
self.total_n += self.CheckDistribution(pmf_n, n_actual, self.n_seq)
# check the distribution of q
pmf_q = suite.DistQ()
print 'q'
self.total_q += self.CheckDistribution(pmf_q, q_actual, self.q_seq)
# check the distribution of additional species
pmf_l = subject.DistL()
print 'l'
self.total_l += self.CheckDistribution(pmf_l, l_actual, self.l_seq)
def CheckDistribution(self, pmf, actual, seq):
"""Checks a predictive distribution and returns a score vector.
pmf: predictive distribution
actual: actual value
seq: which sequence to append (actual, mean) onto
"""
mean = pmf.Mean()
seq.append((actual, mean))
cdf = pmf.MakeCdf()
PrintPrediction(cdf, actual)
sv = ScoreVector(cdf, self.ps, actual)
return sv
def ScoreVector(cdf, ps, actual):
"""Checks whether the actual value falls in each credible interval.
cdf: predictive distribution
ps: percentages to check (0-100)
actual: actual value
Returns: numpy array of 0, 0.5, or 1
"""
scores = []
for p in ps:
low, high = cdf.CredibleInterval(p)
score = Score(low, high, actual)
scores.append(score)
return numpy.array(scores)
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes.MakePmfFromItems(enumerate(prevalences))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes.MakeHistFromList(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def PlotSubjectCdf(code=None, clean_param=0):
"""Checks whether the Dirichlet model can replicate the data.
"""
subject_map, uber_subject = ReadCompleteDataset(clean_param=clean_param)
if code is None:
subjects = subject_map.values()
subject = random.choice(subjects)
code = subject.code
elif code == 'uber':
subject = uber_subject
else:
subject = subject_map[code]
print subject.code
m = subject.GetM()
subject.Process(high=m, conc=0.1, iters=0)
print subject.suite.params[:m]
# plot the cdf
options = dict(linewidth=3, color='blue', alpha=0.5)
cdf = subject.MakeCdf()
thinkplot.Cdf(cdf, **options)
options = dict(linewidth=1, color='green', alpha=0.5)
# generate fake subjects and plot their CDFs
for _ in range(10):
prevalences = subject.suite.SamplePrevalences(m)
fake = FakeSubject(prevalences=prevalences)
cdf = fake.MakeCdf()
thinkplot.Cdf(cdf, **options)
root = 'species-cdf-%s' % code
thinkplot.Save(root=root,
xlabel='rank',
ylabel='CDF',
xscale='log',
formats=FORMATS,
)
def RunCalibration(flag='cal', num_runs=100, clean_param=50):
"""Runs either the calibration or validation process.
flag: string 'cal' or 'val'
num_runs: how many runs
clean_param: parameter used for data cleaning
"""
cal = Calibrator(conc=0.1)
if flag == 'val':
cal.Validate(num_runs=num_runs, clean_param=clean_param)
else:
cal.Calibrate(num_runs=num_runs)
cal.PlotN(root='species-n-%s' % flag)
cal.PlotQ(root='species-q-%s' % flag)
cal.PlotL(root='species-l-%s' % flag)
cal.PlotCalibrationCurves(root='species5-%s' % flag)
def RunTests():
"""Runs calibration code and generates some figures."""
RunCalibration(flag='val')
RunCalibration(flag='cal')
PlotSubjectCdf('B1558.G', clean_param=50)
PlotSubjectCdf(None)
def main(script):
RandomSeed(17)
RunSubject('B1242', conc=1, high=100)
RandomSeed(17)
SimpleDirichletExample()
RandomSeed(17)
HierarchicalExample()
if __name__ == '__main__':
main(*sys.argv)
| mit |
ctherien/pysptools | pysptools/tests/test_pwc.py | 1 | 1780 | #
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# test_pwc.py - This file is part of the PySptools package.
#
"""
The following function is tested:
bilateral
"""
from __future__ import print_function
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import pysptools.sigproc as sig
def tests():
plt.ioff()
data_path = os.environ['PYSPTOOLS_DATA']
home = os.environ['HOME']
result_path = osp.join(home, 'results')
if osp.exists(result_path) == False:
os.makedirs(result_path)
fin = open(os.path.join(data_path, 'dnagwas.txt'))
signal_txt = fin.readlines()
signal = [float(x) for x in signal_txt]
z = sig.bilateral(np.array(signal), 0, 10, 25, display=1, maxiter=5)
plt.plot(signal)
plt.plot(z, color='r')
if os.path.exists(result_path) == False:
os.makedirs(result_path)
plt.savefig(os.path.join(result_path, 'dnagwas.png'))
if __name__ == '__main__':
import sys
print(sys.version_info)
tests()
| apache-2.0 |
srowen/spark | python/pyspark/pandas/tests/test_dataframe_spark_io.py | 14 | 19999 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import glob
import os
import numpy as np
import pandas as pd
import pyarrow as pa
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class DataFrameSparkIOTest(PandasOnSparkTestCase, TestUtils):
"""Test cases for big data I/O using Spark."""
@property
def test_column_order(self):
return ["i32", "i64", "f", "bhello"]
@property
def test_pdf(self):
pdf = pd.DataFrame(
{
"i32": np.arange(20, dtype=np.int32) % 3,
"i64": np.arange(20, dtype=np.int64) % 5,
"f": np.arange(20, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=20).astype("O"),
},
columns=self.test_column_order,
index=np.random.rand(20),
)
return pdf
def test_parquet_read(self):
with self.temp_dir() as tmp:
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.parquet(tmp, mode="overwrite")
def check(columns, expected):
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp, columns=columns)
actual = ps.read_parquet(tmp, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
check(None, data)
check(["i32", "i64"], data[["i32", "i64"]])
check(["i64", "i32"], data[["i64", "i32"]])
if LooseVersion(pa.__version__) < LooseVersion("1.0.0"):
# TODO: `pd.read_parquet()` changed the behavior due to PyArrow 1.0.0.
# We might want to adjust the behavior. Let's see how pandas handles it.
check(("i32", "i64"), data[["i32", "i64"]])
check(["a", "b", "i32", "i64"], data[["i32", "i64"]])
check([], pd.DataFrame([]))
check(["a"], pd.DataFrame([]))
check("i32", pd.DataFrame([]))
check("float", data[["f"]])
# check with pyspark patch.
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp)
else:
expected = data
actual = ps.read_parquet(tmp)
self.assertPandasEqual(expected, actual.to_pandas())
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_parquet(tmp, index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_parquet_read_with_pandas_metadata(self):
with self.temp_dir() as tmp:
expected1 = self.test_pdf
path1 = "{}/file1.parquet".format(tmp)
expected1.to_parquet(path1)
self.assert_eq(ps.read_parquet(path1, pandas_metadata=True), expected1)
expected2 = expected1.reset_index()
path2 = "{}/file2.parquet".format(tmp)
expected2.to_parquet(path2)
self.assert_eq(ps.read_parquet(path2, pandas_metadata=True), expected2)
expected3 = expected2.set_index("index", append=True)
path3 = "{}/file3.parquet".format(tmp)
expected3.to_parquet(path3)
self.assert_eq(ps.read_parquet(path3, pandas_metadata=True), expected3)
def test_parquet_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_parquet(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_parquet(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
def test_table(self):
with self.table("test_table"):
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.spark.to_table("test_table", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_table("test_table", mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["bhello"])[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["bhello"])[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["i32", "bhello"])[["f", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["i32", "bhello"])[["f", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_spark_io(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_spark_io(tmp, format="json", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_spark_io(
tmp, format="json", mode="overwrite", partition_cols=["i32", "bhello"]
)
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(path=tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
col_order = ["f", "i32", "i64"]
expected_idx = expected.set_index("bhello")[col_order]
actual_idx = ps.read_spark_io(tmp, format="json", index_col="bhello")[col_order]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
@unittest.skip("openpyxl")
def test_read_excel(self):
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path1)
self.assert_eq(ps.read_excel(open(path1, "rb")), pd.read_excel(open(path1, "rb")))
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0),
pd.read_excel(open(path1, "rb"), index_col=0),
)
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
pd.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(path1), pd.read_excel(path1))
self.assert_eq(ps.read_excel(path1, index_col=0), pd.read_excel(path1, index_col=0))
self.assert_eq(
ps.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path1, index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(tmp), pd.read_excel(path1))
path2 = "{}/file2.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path2)
self.assert_eq(
ps.read_excel(tmp, index_col=0).sort_index(),
pd.concat(
[pd.read_excel(path1, index_col=0), pd.read_excel(path2, index_col=0)]
).sort_index(),
)
self.assert_eq(
ps.read_excel(tmp, index_col=0, squeeze=True).sort_index(),
pd.concat(
[
pd.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path2, index_col=0, squeeze=True),
]
).sort_index(),
)
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
with pd.ExcelWriter(path1) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
sheet_names = [["Sheet_name_1", "Sheet_name_2"], None]
pdfs1 = pd.read_excel(open(path1, "rb"), sheet_name=None, index_col=0)
pdfs1_squeezed = pd.read_excel(
open(path1, "rb"), sheet_name=None, index_col=0, squeeze=True
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(open(path1, "rb"), sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(
open(path1, "rb"), sheet_name=sheet_name, index_col=0, squeeze=True
)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
self.assert_eq(
ps.read_excel(tmp, index_col=0, sheet_name="Sheet_name_2"),
pdfs1["Sheet_name_2"],
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
path2 = "{}/file2.xlsx".format(tmp)
with pd.ExcelWriter(path2) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
pdfs2 = pd.read_excel(path2, sheet_name=None, index_col=0)
pdfs2_squeezed = pd.read_excel(path2, sheet_name=None, index_col=0, squeeze=True)
self.assert_eq(
ps.read_excel(tmp, sheet_name="Sheet_name_2", index_col=0).sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
self.assert_eq(
ps.read_excel(
tmp, sheet_name="Sheet_name_2", index_col=0, squeeze=True
).sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat([pdfs1["Sheet_name_1"], pdfs2["Sheet_name_1"]]).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_1"], pdfs2_squeezed["Sheet_name_1"]]
).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
def test_read_orc(self):
with self.temp_dir() as tmp:
path = "{}/file1.orc".format(tmp)
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.orc(path, mode="overwrite")
# `spark.write.orc` create a directory contains distributed orc files.
# But pandas only can read from file, not directory. Therefore, we need orc file path.
orc_file_path = glob.glob(os.path.join(path, "*.orc"))[0]
expected = data.reset_index()[data.columns]
actual = ps.read_orc(path)
self.assertPandasEqual(expected, actual.to_pandas())
# columns
columns = ["i32", "i64"]
expected = data.reset_index()[columns]
actual = ps.read_orc(path, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
# index_col
expected = data.set_index("i32")
actual = ps.read_orc(path, index_col="i32")
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])
actual = ps.read_orc(path, index_col=["i32", "f"])
self.assert_eq(actual, expected)
# index_col with columns
expected = data.set_index("i32")[["i64", "bhello"]]
actual = ps.read_orc(path, index_col=["i32"], columns=["i64", "bhello"])
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])[["bhello", "i64"]]
actual = ps.read_orc(path, index_col=["i32", "f"], columns=["bhello", "i64"])
self.assert_eq(actual, expected)
msg = "Unknown column name 'i'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns="i32")
msg = "Unknown column name 'i34'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns=["i34", "i64"])
def test_orc_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_orc(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_orc(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_spark_io import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
llhe/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
annahs/atmos_research | WHI_long_term_v2_GEOSChem_and_meas_histograms_by_HYSPLIT_cluster_for_all_FT_sampling_Junwei_data.py | 1 | 25381 | import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from pyhdf.SD import SD, SDC, SDS
timezone = -8
calib_stability_uncertainty = 0.1
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
##sampling times
sampling_times_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/Junwei_runs/WHI_SP2_6h_rBC_mass_concs.txt'
sampling_times = []
with open(sampling_times_file,'r') as f:
f.readline()
for line in f:
newline = line.split()
sampling_date = newline[0]
sampling_time = newline[1]
sampling_datetime = datetime(int(sampling_date[0:4]),int(sampling_date[5:7]),int(sampling_date[8:10]),int(sampling_time[0:2]))
sampling_times.append(sampling_datetime)
#open cluslist and read into a python list
cluslist = []
CLUSLIST_file = 'C:/hysplit4/working/WHI/CLUSLIST_10'
CLUSLIST_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/HYSPLIT/clustering/CLUSLIST_10'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
cluster_no = int(newline[0])
traj_time = datetime(int(newline[2])+2000,int(newline[3]),int(newline[4]),int(newline[5]))+timedelta(hours = timezone)
cluslist.append([traj_time,cluster_no])
# sort cluslist by row_datetime in place
cluslist.sort(key=lambda clus_info: clus_info[0])
#make a copy for sorting the GeosChem data
cluslist_GC = copy.deepcopy(cluslist)
############Meaurements
#get full rBC record (in PST and 10 min binned intervals) and put in dictionaries keyed by date
rBC_24h_data = {} #does not include BB data
rBC_BB_24h_data = {}
rBC_FT_data_cluster_NPac = {}
rBC_FT_data_cluster_SPac = {}
rBC_FT_data_cluster_Cont = {}
rBC_FT_data_cluster_LRT = {}
rBC_FT_data_cluster_GBPS = {}
rBC_FT_data_cluster_BB = {}
with open('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/WHI_rBC_record_2009to2013-spikes_removed.rbcpckl', 'r') as f: #this has only the data of interest, it has been truncated at May 31, 2012 also these row_datetimes are in PST
full_rBC_record = pickle.load(f)
for row in full_rBC_record:
row_datetime = row[0] #in PST
row_date = datetime(row_datetime.year, row_datetime.month, row_datetime.day)
row_rBC_mass_conc = row[2]
row_rBC_mass_conc_LL = row[3]
row_rBC_mass_conc_UL = row[4]
if np.isnan(row_rBC_mass_conc_LL):
row_abs_err = np.nan
else:
row_abs_err = (row_rBC_mass_conc-row_rBC_mass_conc_LL)
#get all 24hr data (could make it less BB times if we this after the BB data extraction code
correction_factor_for_massdistr = 1./0.4767
mass_distr_correction_error = 0.016 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if row_date in rBC_24h_data:
rBC_24h_data[row_date].append(row_data)
else:
rBC_24h_data[row_date] = [row_data]
#if in a BB time, put this data in BB dict
if (fire_time1[0] <= row_datetime <= fire_time1[1]) or (fire_time2[0] <= row_datetime <= fire_time2[1]):
correction_factor_for_massdistr = 1./0.4153
mass_distr_correction_error = 0.018 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if row_date in rBC_BB_24h_data:
rBC_BB_24h_data[row_date].append(row_data)
else:
rBC_BB_24h_data[row_date] = [row_data]
#pop off any cluslist times that are in the past
cluslist_current_datetime = cluslist[0][0] #in PST
while row_datetime > (cluslist_current_datetime + timedelta(hours=3)):
cluslist.pop(0)
if len(cluslist):
cluslist_current_datetime = cluslist[0][0]
continue
else:
break
#get cluster no
cluslist_current_cluster_no = cluslist[0][1]
#add data to list in cluster dictionaries (1 list per cluster time early night/late night)
if ((cluslist_current_datetime-timedelta(hours=3)) <= row_datetime <= (cluslist_current_datetime+timedelta(hours=3))):
#if in a BB time,
if (fire_time1[0] <= row_datetime <= fire_time1[1]) or (fire_time2[0] <= row_datetime <= fire_time2[1]):
correction_factor_for_massdistr = 1./0.415
mass_distr_correction_error = 0.018 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_BB:
rBC_FT_data_cluster_BB[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_BB[cluslist_current_datetime] = [row_data]
continue #do not go on to put this data into a cluster dictionary, since it's BB data
if cluslist_current_cluster_no == 9:
correction_factor_for_massdistr = 1./0.5411
mass_distr_correction_error = 0.015 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_GBPS:
rBC_FT_data_cluster_GBPS[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_GBPS[cluslist_current_datetime] = [row_data]
if cluslist_current_cluster_no == 4:
correction_factor_for_massdistr = 1./0.4028
mass_distr_correction_error = 0.028 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_Cont:
rBC_FT_data_cluster_Cont[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_Cont[cluslist_current_datetime] = [row_data]
if cluslist_current_cluster_no in [6,8]:
correction_factor_for_massdistr = 1./0.4626
mass_distr_correction_error = 0.032 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_SPac:
rBC_FT_data_cluster_SPac[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_SPac[cluslist_current_datetime] = [row_data]
if cluslist_current_cluster_no in [2,7]:
correction_factor_for_massdistr = 1./0.5280
mass_distr_correction_error = 0.019 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_LRT:
rBC_FT_data_cluster_LRT[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_LRT[cluslist_current_datetime] = [row_data]
if cluslist_current_cluster_no in [1,3,5,10]:
correction_factor_for_massdistr = 1./0.3525
mass_distr_correction_error = 0.015 #this is the uncertainty in the firt of the mass distribution for this period. from WHI_long_term_v2_size_distr_fitting_and_plotting.py
corrected_mass_conc = row_rBC_mass_conc*correction_factor_for_massdistr
row_data = [corrected_mass_conc, row_abs_err+(corrected_mass_conc*(mass_distr_correction_error+calib_stability_uncertainty)) ]
if cluslist_current_datetime in rBC_FT_data_cluster_NPac:
rBC_FT_data_cluster_NPac[cluslist_current_datetime].append(row_data)
else:
rBC_FT_data_cluster_NPac[cluslist_current_datetime] = [row_data]
#24h rBC-meas avgs
SP2_24h_FR = []
SP2_24h_BB = []
#6h rBC-meas avgs (FT data)
SP2_6h_NPac = []
SP2_6h_SPac = []
SP2_6h_Cont = []
SP2_6h_LRT = []
SP2_6h_GBPS = []
SP2_6h_BB = []
SP2_6h_all_non_BB = []
#24h avgd data
for date, mass_data in rBC_24h_data.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_24h_FR.append([date_mean,date_mean_err])
for date, mass_data in rBC_BB_24h_data.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_24h_BB.append([date_mean,date_mean_err])
#6h avgd data
for date, mass_data in rBC_FT_data_cluster_NPac.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_NPac.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
for date, mass_data in rBC_FT_data_cluster_SPac.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_SPac.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
for date, mass_data in rBC_FT_data_cluster_Cont.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_Cont.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
for date, mass_data in rBC_FT_data_cluster_LRT.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_LRT.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
for date, mass_data in rBC_FT_data_cluster_GBPS.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_GBPS.append([date_mean,date_mean_err])
SP2_6h_all_non_BB.append([date_mean,date_mean_err])
for date, mass_data in rBC_FT_data_cluster_BB.iteritems():
mass_concs = [row[0] for row in mass_data]
mass_concs_abs_err = [row[1] for row in mass_data]
date_mean = np.mean(mass_concs)
date_mean_err = np.mean(mass_concs_abs_err)/date_mean
SP2_6h_BB.append([date_mean,date_mean_err])
###################GEOS-Chem
GC_data = {}
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/Junwei_runs/v10_tests/All_together/'
os.chdir(data_dir)
lat = 20 #20 corresponds to 50deg
lon = 7 #7 corresponds to -122.5deg
level = 9 #1-47 #9 is closest to WHI avg P (WHI 95% CI = 770-793)
molar_mass_BC = 12.0107 #in g/mol
ng_per_g = 10**9
R = 8.3144621 # in m3*Pa/(K*mol)
GEOS_Chem_factor = 10**-9
start_hour = 4
end_hour = 16
pressure = []
for file in os.listdir(data_dir):
if file.endswith('.hdf'):
file_year = int(file[2:6])
file_month = int(file[6:8])
file_day = int(file[8:10])
file_hour = int(file[11:13])
if start_hour <= file_hour < end_hour: #ignore any times not in the 2000-0800 PST window (0400-1600 UTC)
hdf_file = SD(file, SDC.READ)
#pprint(hdf_file.datasets())
#pressures = hdf_file.select('PEDGE-$::PSURF')
#pressure.append(pressures[level,lat,lon])
#lats = hdf_file.select('LAT')
#lons = hdf_file.select('LON')
##print lats[lat], lons[lon]
if start_hour <= file_hour < (start_hour+6):
period_midtime = datetime(file_year,file_month,file_day,23) - timedelta(days=1) #this is the early night period of 2000-0200 PST (mid time is 2300 of the previous day when converting from UTC to PST)
if (start_hour+6) <= file_hour < end_hour:
period_midtime = datetime(file_year,file_month,file_day,05) #this is the late night period of 0200-0800 PST
i=0
for layer in [level, level-1, level+1]:
hydrophilic_BC = hdf_file.select('IJ-AVG-$::BCPI') #3d conc data in ppbv (molBC/molAIR)
hydrophobic_BC = hdf_file.select('IJ-AVG-$::BCPO')
total_BC_ppbv = hydrophilic_BC[layer,lat,lon] + hydrophobic_BC[layer,lat,lon]
Factor = (1000 * 1e2 * 1e6 * 1e-9) / (8.31 * 273)
BC_conc_ngm3 = total_BC_ppbv*molar_mass_BC*ng_per_g*GEOS_Chem_factor*(101325/(R*273)) #101325/(R*273) corrects to STP
temp_BC = total_BC_ppbv * Factor * 12 *1000
if period_midtime in sampling_times: #this excludes BB times already
if period_midtime in GC_data:
GC_data[period_midtime][i].append(temp_BC)
else:
GC_data[period_midtime] = [[],[],[],'']
GC_data[period_midtime][i].append(temp_BC)
i+=1
hdf_file.end()
#print np.mean(pressure)
#assign clusters
for line in cluslist_GC:
cluster_datetime = line[0]
cluster_no = line[1]
if cluster_datetime in sampling_times:
if cluster_no == 4:
cluster = 'Cont'
if cluster_no == 9:
cluster = 'GBPS'
if cluster_no in [6,8]:
cluster = 'SPac'
if cluster_no in [2,7]:
cluster = 'LRT'
if cluster_no in [1,3,5,10]:
cluster = 'NPac'
if (fire_time1[0] <= cluster_datetime <= fire_time1[1]) or (fire_time2[0] <= cluster_datetime <= fire_time2[1]):
cluster = 'BB'
for period_midtime in GC_data:
if period_midtime == cluster_datetime:
GC_data[period_midtime][3] = cluster
GC_6h_NPac = []
GC_6h_SPac = []
GC_6h_Cont = []
GC_6h_LRT = []
GC_6h_GBPS = []
GC_6h_BB = []
GC_6h_all_non_BB = []
for period_midtime in GC_data:
cluster = GC_data[period_midtime][3]
mean_BC_conc = np.nanmean(GC_data[period_midtime][0])
mean_BC_conc_lvl_dn = np.nanmean(GC_data[period_midtime][1])
mean_BC_conc_lvl_up = np.nanmean(GC_data[period_midtime][2])
BC_conc_ngm3_lower_limit = min(mean_BC_conc,mean_BC_conc_lvl_dn,mean_BC_conc_lvl_up)
BC_conc_ngm3_upper_limit = max(mean_BC_conc,mean_BC_conc_lvl_dn,mean_BC_conc_lvl_up)
pos_y_err = BC_conc_ngm3_upper_limit - mean_BC_conc
neg_y_err = mean_BC_conc - BC_conc_ngm3_lower_limit
mean_rel_err = ((pos_y_err+neg_y_err)/2)/mean_BC_conc
if cluster == 'BB':
GC_6h_BB.append([mean_BC_conc,mean_rel_err])
if cluster == 'Cont':
GC_6h_Cont.append([mean_BC_conc,mean_rel_err])
if cluster == 'GBPS':
GC_6h_GBPS.append([mean_BC_conc,mean_rel_err])
if cluster == 'NPac':
GC_6h_NPac.append([mean_BC_conc,mean_rel_err])
if cluster == 'SPac':
GC_6h_SPac.append([mean_BC_conc,mean_rel_err])
if cluster == 'LRT':
GC_6h_LRT.append([mean_BC_conc,mean_rel_err])
if cluster != 'BB':
GC_6h_all_non_BB.append([mean_BC_conc,mean_rel_err])
#print out percentile data and uncertainties
stats_SP2 = {
'SP2_24h_FR':[SP2_24h_FR],
'SP2_24h_BB':[SP2_24h_BB],
'SP2_6h_NPac':[SP2_6h_NPac],
'SP2_6h_SPac':[SP2_6h_SPac],
'SP2_6h_Cont':[SP2_6h_Cont],
'SP2_6h_LRT':[SP2_6h_LRT],
'SP2_6h_GBPS':[SP2_6h_GBPS],
'SP2_6h_BB':[SP2_6h_BB],
'SP2_6h_all_non_BB':[SP2_6h_all_non_BB],
}
file_list = []
print 'SP2'
for key, value in stats_SP2.iteritems():
mass_concs = [row[0] for row in value[0]]
mass_concs_rel_errs = [row[1] for row in value[0]]
#print key,'no. of samples: ', len(mass_concs)
#print key,'mass concs', np.percentile(mass_concs, 10),np.percentile(mass_concs, 50), np.percentile(mass_concs, 90), np.mean(mass_concs)
#print key,'errs',np.percentile(mass_concs_rel_errs, 50), np.mean(mass_concs_rel_errs)
stats_SP2[key].append(np.percentile(mass_concs, 50))
file_list.append([key,np.percentile(mass_concs, 10),np.percentile(mass_concs, 50), np.percentile(mass_concs, 90), np.mean(mass_concs),np.mean(mass_concs_rel_errs)])
#save stats to file
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
file = open('WHI_long_term_SP2_stats_by_cluster.txt', 'w')
file.write('mass conc stats in ng/m3 - stp' +'\n')
file.write('cluster' + '\t' + '10th percentile' + '\t' + '50th percentile' + '\t' + '90th percentile' + '\t' + 'mean' + '\t' +'mean rel err' +'\n')
for row in file_list:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
stats_GC = {
'GC_6h_NPac':[GC_6h_NPac],
'GC_6h_SPac':[GC_6h_SPac],
'GC_6h_Cont':[GC_6h_Cont],
'GC_6h_LRT':[GC_6h_LRT],
'GC_6h_GBPS':[GC_6h_GBPS],
#'GC_6h_BB':[GC_6h_BB],
'GC_6h_all_non_BB':[GC_6h_all_non_BB],
}
print 'GC'
for key, value in stats_GC.iteritems():
mass_concs = [row[0] for row in value[0]]
mass_concs_rel_errs = [row[1] for row in value[0]]
print key,'mass concs', np.percentile(mass_concs, 10),np.percentile(mass_concs, 50), np.percentile(mass_concs, 90), np.mean(mass_concs)
#print key,'rel err', np.mean(mass_concs_rel_errs)
stats_GC[key].append(np.percentile(mass_concs, 50))
###################plotting
SP2_6h_NPac_m = [row[0] for row in SP2_6h_NPac]
SP2_6h_SPac_m = [row[0] for row in SP2_6h_SPac]
SP2_6h_Cont_m = [row[0] for row in SP2_6h_Cont]
SP2_6h_LRT_m = [row[0] for row in SP2_6h_LRT]
SP2_6h_GBPS_m = [row[0] for row in SP2_6h_GBPS]
SP2_6h_BB_m = [row[0] for row in SP2_6h_BB]
SP2_6h_all_non_BB_m = [row[0] for row in SP2_6h_all_non_BB]
GC_6h_NPac_m = [row[0] for row in GC_6h_NPac]
GC_6h_SPac_m = [row[0] for row in GC_6h_SPac]
GC_6h_Cont_m = [row[0] for row in GC_6h_Cont]
GC_6h_LRT_m = [row[0] for row in GC_6h_LRT]
GC_6h_GBPS_m = [row[0] for row in GC_6h_GBPS]
GC_6h_BB_m = [row[0] for row in GC_6h_BB]
GC_6h_all_non_BB_m = [row[0] for row in GC_6h_all_non_BB]
fig = plt.figure(figsize=(12,6))
bin_number = 20
bin_number_BB = 20
FT_UL = 300
FT_UL_BB = 625
bin_range = (0,FT_UL)
bin_range_BB = (0,FT_UL_BB)
incr = 100
ax1 = plt.subplot2grid((2,5), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,5), (0,1), colspan=1, sharey=ax1)
ax3 = plt.subplot2grid((2,5), (0,2), colspan=1, sharey=ax1)
ax4 = plt.subplot2grid((2,5), (0,3), colspan=1, sharey=ax1)
ax5 = plt.subplot2grid((2,5), (0,4), colspan=1, sharey=ax1)
#ax11 = plt.subplot2grid((2,6), (0,5), colspan=1, sharey=ax1)
ax6 = plt.subplot2grid((2,5), (1,0), colspan=1)
ax7 = plt.subplot2grid((2,5), (1,1), colspan=1, sharey=ax6)
ax8 = plt.subplot2grid((2,5), (1,2), colspan=1, sharey=ax6)
ax9 = plt.subplot2grid((2,5), (1,3), colspan=1, sharey=ax6)
ax10 = plt.subplot2grid((2,5), (1,4), colspan=1, sharey=ax6)
#ax12 = plt.subplot2grid((2,6), (1,5), colspan=1, sharey=ax6)
#SP2
ax1.hist(SP2_6h_NPac_m,bins = bin_number, range = bin_range)
ax1.xaxis.set_visible(True)
ax1.yaxis.set_visible(True)
ax1.set_ylabel('frequency - Measurements')
ax1.text(0.25, 0.9,'N. Pacific', transform=ax1.transAxes)
#ax1.set_ylim(0,40)
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position('top')
ax1.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax1.axvline(stats_SP2['SP2_6h_NPac'][1], color= 'black', linestyle = '--')
ax2.hist(SP2_6h_SPac_m,bins = bin_number, range = bin_range)
ax2.xaxis.set_visible(True)
ax2.yaxis.set_visible(False)
ax2.text(0.25, 0.9,'S. Pacific', transform=ax2.transAxes)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
ax2.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax2.axvline(stats_SP2['SP2_6h_SPac'][1], color= 'black', linestyle = '--')
ax3.hist(SP2_6h_GBPS_m,bins = bin_number, range = bin_range)
ax3.xaxis.set_visible(True)
ax3.yaxis.set_visible(False)
ax3.text(0.2, 0.82,'Georgia Basin/\nPuget Sound', transform=ax3.transAxes)
ax3.xaxis.tick_top()
ax3.xaxis.set_label_position('top')
ax3.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax3.axvline(stats_SP2['SP2_6h_GBPS'][1], color= 'black', linestyle = '--')
ax4.hist(SP2_6h_LRT_m,bins = bin_number, range = bin_range)
ax4.xaxis.set_visible(True)
ax4.yaxis.set_visible(False)
ax4.text(0.2, 0.9,'W. Pacific/Asia', transform=ax4.transAxes)
ax4.xaxis.tick_top()
ax4.xaxis.set_label_position('top')
ax4.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax4.axvline(stats_SP2['SP2_6h_LRT'][1], color= 'black', linestyle = '--')
ax5.hist(SP2_6h_Cont_m,bins = bin_number, range = bin_range)
ax5.xaxis.set_visible(True)
ax5.yaxis.set_visible(False)
ax5.text(0.25, 0.9,'N. Canada', transform=ax5.transAxes)
ax5.xaxis.tick_top()
ax5.xaxis.set_label_position('top')
ax5.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax5.axvline(stats_SP2['SP2_6h_Cont'][1], color= 'black', linestyle = '--')
#ax11.hist(SP2_6h_BB_m,bins = bin_number_BB, range = bin_range_BB)
#ax11.xaxis.set_visible(True)
#ax11.yaxis.set_visible(False)
#ax11.text(0.4, 0.9,'BB', transform=ax11.transAxes)
#ax11.xaxis.tick_top()
#ax11.xaxis.set_label_position('top')
#ax11.xaxis.set_ticks(np.arange(0, FT_UL_BB, 200))
#ax11.axvline(stats_SP2['SP2_6h_Cont'][1], color= 'black', linestyle = '--')
#GC
ax6.hist(GC_6h_NPac_m,bins = bin_number, range = bin_range, color = 'green')
ax6.xaxis.set_visible(True)
ax6.yaxis.set_visible(True)
ax6.set_ylabel('frequency - GEOS-Chem')
ax6.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax6.axvline(stats_GC['GC_6h_NPac'][1], color= 'black', linestyle = '--')
ax7.hist(GC_6h_SPac_m,bins = bin_number, range = bin_range, color = 'green')
ax7.xaxis.set_visible(True)
ax7.yaxis.set_visible(False)
ax7.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax7.axvline(stats_GC['GC_6h_SPac'][1], color= 'black', linestyle = '--')
ax8.hist(GC_6h_GBPS_m,bins = bin_number, range = bin_range, color = 'green')
ax8.xaxis.set_visible(True)
ax8.yaxis.set_visible(False)
ax8.set_xlabel('6h rBC mass concentration (ng/m3 - STP)')
ax8.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax8.axvline(stats_GC['GC_6h_GBPS'][1], color= 'black', linestyle = '--')
ax9.hist(GC_6h_LRT_m,bins = bin_number, range = bin_range, color = 'green')
ax9.xaxis.set_visible(True)
ax9.yaxis.set_visible(False)
ax9.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax9.axvline(stats_GC['GC_6h_LRT'][1], color= 'black', linestyle = '--')
ax10.hist(GC_6h_Cont_m,bins = bin_number, range = bin_range, color = 'green')
ax10.xaxis.set_visible(True)
ax10.yaxis.set_visible(False)
ax10.xaxis.set_ticks(np.arange(0, FT_UL, incr))
ax10.axvline(stats_GC['GC_6h_Cont'][1], color= 'black', linestyle = '--')
#ax12.hist(GC_6h_BB_m,bins = bin_number_BB, range = bin_range_BB, color = 'green')
#ax12.xaxis.set_visible(True)
#ax12.yaxis.set_visible(False)
#ax12.xaxis.set_ticks(np.arange(0, FT_UL_BB, 200))
#ax12.axvline(stats_GC['GC_6h_BB'][1], color= 'black', linestyle = '--')
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
plt.savefig('histograms -clustered GEOS-Chem and measurements - 6h FT - JW_wet_scavenging.png',bbox_inches='tight')
plt.show()
###################plotting 3
fig = plt.figure(figsize=(6,8))
bin_number_all_FT = 30
UL_all_FT = 300
bin_range_all_FT = (0,UL_all_FT)
ax1 = plt.subplot2grid((2,1), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,1), (1,0), colspan=1)
#SP2
ax1.hist(SP2_6h_all_non_BB_m,bins = bin_number_all_FT, range = bin_range_all_FT)
ax1.xaxis.set_visible(True)
ax1.yaxis.set_visible(True)
ax1.set_ylabel('frequency - Measurements')
#ax1.text(0.25, 0.80,'All nighttime measurements \n(not including biomass burning periods)', transform=ax1.transAxes)
#ax1.set_ylim(0,40)
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position('top')
ax1.xaxis.set_ticks(np.arange(0, UL_all_FT, 50))
ax1.axvline(stats_SP2['SP2_6h_all_non_BB'][1], color= 'black', linestyle = '--')
#GC
ax2.hist(GC_6h_all_non_BB_m,bins = bin_number_all_FT, range = bin_range_all_FT, color = 'green')
ax2.xaxis.set_visible(True)
ax2.yaxis.set_visible(True)
ax2.set_ylabel('frequency - GEOS-Chem')
ax2.xaxis.set_ticks(np.arange(0, UL_all_FT, 50))
ax2.axvline(stats_GC['GC_6h_all_non_BB'][1], color= 'black', linestyle = '--')
ax2.set_xlabel('6h rBC mass concentration (ng/m3 - STP)')
#plt.figtext(0.35,0.06, '6h rBC mass concentration (ng/m3 - STP)')
plt.subplots_adjust(hspace=0.07)
plt.subplots_adjust(wspace=0.07)
plt.savefig('histograms - GEOS-Chem and measurements - all non-BB FT - 6h - JW_wet_scavenging.png',bbox_inches='tight')
plt.show()
SP2_6h_all_non_BB
| mit |
fyffyt/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
wooga/airflow | airflow/providers/presto/hooks/presto.py | 1 | 4892 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow.hooks.dbapi_hook import DbApiHook
class PrestoException(Exception):
"""
Presto exception
"""
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id) # pylint: disable=no-member
auth = prestodb.auth.BasicAuthentication(db.login, db.password) if db.password else None
return prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get('source', 'airflow'),
http_scheme=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level()
)
def get_isolation_level(self):
"""Returns an isolation level"""
db = self.get_connection(self.presto_conn_id) # pylint: disable=no-member
isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super().get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super().get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super().run(self._strip_sql(hql), parameters)
def insert_rows(self, table, rows, target_fields=None, commit_every=0):
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
'Transactions are not enable in presto connection. '
'Please use the isolation_level property to enable it. '
'Falling back to insert all rows in one transaction.'
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
| apache-2.0 |
yassineS/COSMOS-2.0 | cosmos/web/gemon/views.py | 2 | 1089 | import getpass
from flask import Blueprint
from flask import render_template
bprint = Blueprint('gemon', __name__, template_folder='templates')
@bprint.route('/')
def home():
import numpy as np
from .ge import qstat
import pandas as pd
# df_user = qstat()
df_all = qstat('*')
if len(df_all) != 0:
df_user = df_all[df_all['JB_owner'] == getpass.getuser()]
def summarize(df):
def f():
for state, df_ in df.groupby(['state']):
yield '%s_jobs' % state, [len(df_)]
yield '%s_slots' % state, [df_.slots.astype(int).sum()]
yield 'sum(io_usage)', ["{:,}".format(int(np.nan_to_num(df.io_usage.astype(float).sum())))]
return pd.DataFrame(dict(f()))
df_user_summary = summarize(df_user)
df_all_summary = summarize(df_all)
else:
df_user_summary, df_all_summary = None, None
return render_template('gemon/home.html', df_user=df_user, df_user_summary=df_user_summary,
df_all_summary=df_all_summary) | gpl-3.0 |
nighres/nighres | nighres/io/io_mesh.py | 1 | 25827 | import nibabel as nb
import numpy as np
# TODO: compare with Nilearn functions and possibly extend
def load_mesh(surf_mesh):
'''
Load a mesh into a dictionary with entries
"points", "faces" and "data"
Parameters
----------
surf_mesh:
Mesh to be loaded, can be a path to a file
(currently supported formats are freesurfer geometry formats,
gii and ASCII-coded vtk, ply or obj) or a dictionary with the
keys "points", "faces" and (optionally) "data"
Returns
----------
dict
Dictionary with a numpy array with key "points" for a Numpy array of
the x-y-z coordinates of the mesh vertices and key "faces" for a
Numpy array of the the indices (into points) of the mesh faces.
Optional "data" key is a Numpy array of values sampled on the "points".
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
if surf_mesh.endswith('vtk'):
points, faces, data = _read_vtk(surf_mesh)
return {'points': points, 'faces': faces, 'data': data}
elif surf_mesh.endswith('gii'):
points, faces, data = _read_gifti(surf_mesh)
return {'points': points, 'faces': faces, 'data': data}
else:
geom = load_mesh_geometry(surf_mesh)
return geom
def save_mesh(filename, surf_dict):
'''
Saves surface mesh to file
Parameters
----------
filename: str
Full path and filename under which surfaces data should be saved. The
extension determines the file format. Currently supported are
freesurfer geometry formats, gii and ASCII-coded vtk, obj, ply. Note
that only ASCII-coded vtk currently saves data, the others only save
the geometry.
surf_dict: dict
Surface mesh geometry to be saved. Dictionary with a numpy array with
key "points" for a Numpy array of the x-y-z coordinates of the mesh
vertices and key "faces" for a Numpy array of the the indices
(into points) of the mesh faces. Optional "data" key is a Numpy array
of values sampled on the "points"
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
if filename.endswith('vtk'):
_write_vtk(filename, surf_dict['points'], surf_dict['faces'],
surf_dict['data'])
elif filename.endswith('gii'):
_write_gifti(filename, surf_dict['points'], surf_dict['faces'],
surf_dict['data'])
else:
save_mesh_geometry(filename, surf_dict)
def load_mesh_geometry(surf_mesh):
'''
Load a mesh geometry into a dictionary with entries
"points" and "faces"
Parameters
----------
surf_mesh:
Mesh geometry to be loaded, can be a path to a file
(currently supported formats are freesurfer geometry formats,
gii and ASCII-coded vtk, ply or obj) or a dictionary with the
keys "points" and "faces"
Returns
----------
dict
Dictionary with a numpy array with key "points" for a Numpy array of
the x-y-z coordinates of the mesh vertices and key "faces" for a
Numpy array of the the indices (into points) of the mesh faces
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
# if input is a filename, try to load it with nibabel
if isinstance(surf_mesh, str):
if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or
surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or
surf_mesh.endswith('inflated')):
points, faces = nb.freesurfer.io.read_geometry(surf_mesh)
elif surf_mesh.endswith('gii'):
points, faces, = _read_gifti(surf_mesh)
elif surf_mesh.endswith('vtk'):
points, faces, _ = _read_vtk(surf_mesh)
elif surf_mesh.endswith('ply'):
points, faces = _read_ply(surf_mesh)
elif surf_mesh.endswith('obj'):
points, faces = _read_obj(surf_mesh)
else:
raise ValueError('Currently supported file formats are freesurfer '
'geometry formats and gii, vtk, ply, obj')
elif isinstance(surf_mesh, dict):
if ('faces' in surf_mesh and 'points' in surf_mesh):
points, faces = surf_mesh['points'], surf_mesh['faces']
else:
raise ValueError('If surf_mesh is given as a dictionary it '
'must contain items with keys "points" and '
'"faces"')
else:
raise ValueError('Input surf_mesh must be a either filename or a '
'dictionary containing items with keys "points" '
'and "faces"')
return {'points': points, 'faces': faces}
def load_mesh_data(surf_data, gii_darray=None):
'''
Loads mesh data into a Numpy array
Parameters
----------
surf_data:
Mesh data to be loaded, can be a Numpy array or a path to a file.
Currently supported formats are freesurfer data formats (mgz, curv,
sulc, thickness, annot, label), nii, gii, ASCII-coded vtk and txt
gii_darray: int, optional
Index of gii data array to load (default is to load all)
Returns
----------
np.ndarray
Numpy array containing the data
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
# if the input is a filename, load it
if isinstance(surf_data, str):
if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or
surf_data.endswith('mgz')):
data = np.squeeze(nb.load(surf_data).get_data())
elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or
surf_data.endswith('thickness')):
data = nb.freesurfer.io.read_morph_data(surf_data)
elif surf_data.endswith('annot'):
data = nb.freesurfer.io.read_annot(surf_data)[0]
elif surf_data.endswith('label'):
data = nb.freesurfer.io.read_label(surf_data)
# check if this works with multiple indices (if dim(data)>1)
elif surf_data.endswith('gii'):
_, _, data = _read_gifti(surf_data)
elif surf_data.endswith('vtk'):
_, _, data = _read_vtk(surf_data)
elif surf_data.endswith('txt'):
data = np.loadtxt(surf_data)
else:
raise ValueError('Format of data file not recognized. Currently '
'supported formats are freesurfer data formats '
'(mgz, sulc, curv, thickness, annot, label)'
'nii', 'gii, ASCII-coded vtk and txt')
elif isinstance(surf_data, np.ndarray):
data = np.squeeze(surf_data)
return data
def save_mesh_data(filename, surf_data):
'''
Saves surface data that is a Numpy array to file
Parameters
----------
filename: str
Full path and filename under which surfaces data should be saved. The
extension determines the file format. Currently supported are
freesurfer formats curv, thickness, sulc and ASCII-coded txt'
surf_data: np.ndarray
Surface data to be saved
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
if isinstance(filename, str) and isinstance(surf_data, np.ndarray):
if (filename.endswith('curv') or filename.endswith('thickness') or
filename.endswith('sulc')):
nb.freesurfer.io.write_morph_data(filename, surf_data)
print("\nSaving {0}".format(filename))
elif filename.endswith('txt'):
np.savetxt(filename, surf_data)
print("\nSaving {0}".format(filename))
else:
raise ValueError('File format not recognized. Currently supported '
'are freesurfer formats curv, sulc, thickness '
'and ASCII coded vtk and txt')
else:
raise ValueError('Filename must be a string')
def save_mesh_geometry(filename, surf_dict):
'''
Saves surface mesh geometry to file
Parameters
----------
filename: str
Full path and filename under which surfaces data should be saved. The
extension determines the file format. Currently supported are
freesurfer geometry formats, gii and ASCII-coded vtk, obj, ply'
surf_dict: dict
Surface mesh geometry to be saved. Dictionary with a numpy array with
key "points" for a Numpy array of the x-y-z coordinates of the mesh
vertices and key "faces2 for a Numpy array of the the indices
(into points) of the mesh faces
Notes
----------
Originally created as part of Laminar Python [1]_
References
-----------
.. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical
depth-resolved analysis of high-resolution brain imaging data in
Python. DOI: 10.3897/rio.3.e12346
'''
if isinstance(filename, str) and isinstance(surf_dict, dict):
if (filename.endswith('orig') or filename.endswith('pial') or
filename.endswith('white') or filename.endswith('sphere') or
filename.endswith('inflated')):
nb.freesurfer.io.write_geometry(filename, surf_dict['points'],
surf_dict['faces'])
print("\nSaving {0}".format(filename))
elif filename.endswith('gii'):
_write_gifti(filename, surf_dict['points'], surf_dict['faces'])
print("\nSaving {0}".format(filename))
elif filename.endswith('vtk'):
if 'data' in surf_dict.keys():
_write_vtk(filename, surf_dict['points'], surf_dict['faces'],
surf_dict['data'])
print("\nSaving {0}".format(filename))
else:
_write_vtk(filename, surf_dict['points'], surf_dict['faces'])
print("\nSaving {0}".format(filename))
elif filename.endswith('ply'):
_write_ply(filename, surf_dict['points'], surf_dict['faces'])
print("\nSaving {0}".format(filename))
elif filename.endswith('obj'):
_write_obj(filename, surf_dict['points'], surf_dict['faces'])
print("\nSaving {0}".format(filename))
print('To view mesh in brainview, run the command:\n')
print('average_objects ' + filename + ' ' + filename)
else:
raise ValueError('Filename must be a string and surf_dict must be a '
'dictionary with keys "points" and "faces"')
def _read_gifti(file):
points = nb.gifti.read(file).get_arrays_from_intent(
nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data
faces = nb.gifti.read(file).get_arrays_from_intent(
nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data
narrays = len(nb.gifti.read(file).darrays)-2
if narrays>0:
data = np.zeros([points.shape[0], narrays])
n=0;
for darray in nb.gifti.read(file).darrays:
if darray.intent is not nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'] \
and darray.intent is not nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE']:
data[:,n] = darray.data
n=n+1
else:
data = None
return points, faces, data
# function to read vtk files
# ideally use pyvtk, but it didn't work for our data, look into why
def _read_vtk(file):
'''
Reads ASCII coded vtk files using pandas,
returning vertices, faces and data as three numpy arrays.
'''
import pandas as pd
import csv
# read full file while dropping empty lines
try:
vtk_df = pd.read_csv(file, header=None, engine='python')
except csv.Error:
raise ValueError(
'This vtk file appears to be binary coded currently only ASCII '
'coded vtk files can be read')
vtk_df = vtk_df.dropna()
# extract number of vertices and faces
number_vertices = int(vtk_df[vtk_df[0].str.contains(
'POINTS')][0].iloc[0].split()[1])
number_faces = int(vtk_df[vtk_df[0].str.contains(
'POLYGONS')][0].iloc[0].split()[1])
# read vertices into df and array
start_vertices = (vtk_df[vtk_df[0].str.contains(
'POINTS')].index.tolist()[0]) + 1
vertex_df = pd.read_csv(file, skiprows=range(start_vertices),
nrows=number_vertices, delim_whitespace=True,
header=None, engine='python')
if np.array(vertex_df).shape[1] == 3:
vertex_array = np.array(vertex_df)
# sometimes the vtk format is weird with 9 indices per line,
# then it has to be reshaped
elif np.array(vertex_df).shape[1] == 9:
vertex_df = pd.read_csv(file, skiprows=range(start_vertices),
nrows=int(number_vertices / 3) + 1,
delim_whitespace=True, header=None,
engine='python')
vertex_array = np.array(vertex_df.iloc[0:1, 0:3])
vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 3:6],
axis=0)
vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 6:9],
axis=0)
for row in range(1, (int(number_vertices / 3) + 1)):
for col in [0, 3, 6]:
vertex_array = np.append(vertex_array, np.array(
vertex_df.iloc[row:(row + 1), col:(col + 3)]), axis=0)
# strip rows containing nans
vertex_array = vertex_array[~np.isnan(vertex_array)].reshape(
number_vertices, 3)
else:
print("vertex indices out of shape")
# read faces into df and array
start_faces = (vtk_df[vtk_df[0].str.contains(
'POLYGONS')].index.tolist()[0]) + 1
face_df = pd.read_csv(file, skiprows=range(start_faces),
nrows=number_faces, delim_whitespace=True,
header=None, engine='python')
face_array = np.array(face_df.iloc[:, 1:4])
# read data into df and array if exists
if vtk_df[vtk_df[0].str.contains('POINT_DATA')].index.tolist() != []:
start_data = (vtk_df[vtk_df[0].str.contains(
'POINT_DATA')].index.tolist()[0]) + 3
number_data = number_vertices
data_df = pd.read_csv(file, skiprows=range(start_data),
nrows=number_data, delim_whitespace=True,
header=None, engine='python')
data_array = np.array(data_df)
else:
data_array = None
return vertex_array, face_array, data_array
def _read_ply(file):
import pandas as pd
import csv
# read full file and drop empty lines
try:
ply_df = pd.read_csv(file, header=None, engine='python')
except csv.Error:
raise ValueError(
'This ply file appears to be binary coded currently only '
'ASCII coded ply files can be read')
ply_df = ply_df.dropna()
# extract number of vertices and faces, and row that marks end of header
number_vertices = int(ply_df[ply_df[0].str.contains(
'element vertex')][0].iloc[0].split()[2])
number_faces = int(ply_df[ply_df[0].str.contains(
'element face')][0].iloc[0].split()[2])
end_header = ply_df[ply_df[0].str.contains('end_header')].index.tolist()[0]
# read vertex coordinates into dict
vertex_df = pd.read_csv(file, skiprows=range(end_header + 1),
nrows=number_vertices, sep='\s*', header=None,
engine='python')
vertex_array = np.array(vertex_df)
# read face indices into dict
face_df = pd.read_csv(file,
skiprows=range(end_header + number_vertices + 1),
nrows=number_faces, sep='\s*', header=None,
engine='python')
face_array = np.array(face_df.iloc[:, 1:4])
return vertex_array, face_array
# function to read MNI obj mesh format
def _read_obj(file):
def chunks(l, n):
"""Yield n-sized chunks from l"""
for i in range(0, len(l), n):
yield l[i:i + n]
def indices(lst, element):
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset + 1)
except ValueError:
return result
result.append(offset)
fp = open(file, 'r')
n_vert = []
n_poly = []
k = 0
Polys = []
# Find number of vertices and number of polygons, stored in .obj file.
# Then extract list of all vertices in polygons
for i, line in enumerate(fp):
if i == 0:
# Number of vertices
n_vert = int(line.split()[6])
XYZ = np.zeros((n_vert, 3))
elif i <= n_vert:
XYZ[i - 1] = [float(num) for num in line.split()]
elif i > 2 * n_vert + 5:
if not line.strip():
k = 1
elif k == 1:
Polys.extend(line.split())
Polys = [int(num) for num in Polys]
npPolys = np.array(Polys)
triangles = np.array(list(chunks(Polys, 3)))
return XYZ, triangles
def _write_gifti(surf_mesh, points, faces, data=None):
coord_array = nb.gifti.GiftiDataArray(data=points,
intent=nb.nifti1.intent_codes[
'NIFTI_INTENT_POINTSET'])
face_array = nb.gifti.GiftiDataArray(data=faces,
intent=nb.nifti1.intent_codes[
'NIFTI_INTENT_TRIANGLE'])
if data is not None:
data_array = nb.gifti.GiftiDataArray(data=data,
intent=nb.nifti1.intent_codes[
'NIFTI_INTENT_ESTIMATE'])
gii = nb.gifti.GiftiImage(darrays=[coord_array, face_array, data_array])
else:
gii = nb.gifti.GiftiImage(darrays=[coord_array, face_array])
nb.gifti.write(gii, surf_mesh)
def _write_obj(surf_mesh, points, faces):
# write out MNI - obj format
n_vert = len(points)
XYZ = points.tolist()
Tri = faces.tolist()
with open(surf_mesh, 'w') as s:
line1 = "P 0.3 0.3 0.4 10 1 " + str(n_vert) + "\n"
s.write(line1)
k = -1
for a in XYZ:
k += 1
cor = ' ' + ' '.join(map(str, XYZ[k]))
s.write('%s\n' % cor)
s.write('\n')
for a in XYZ:
s.write(' 0 0 0\n')
s.write('\n')
l = ' ' + str(len(Tri)) + '\n'
s.write(l)
s.write(' 0 1 1 1 1\n')
s.write('\n')
nt = len(Tri) * 3
Triangles = np.arange(3, nt + 1, 3)
Rounded8 = np.shape(Triangles)[0] / 8
N8 = 8 * Rounded8
Triangles8 = Triangles[0:N8]
RowsOf8 = np.split(Triangles8, N8 / 8)
for r in RowsOf8:
L = r.tolist()
Lint = map(int, L)
Line = ' ' + ' '.join(map(str, Lint))
s.write('%s\n' % Line)
L = Triangles[N8:].tolist()
Lint = map(int, L)
Line = ' ' + ' '.join(map(str, Lint))
s.write('%s\n' % Line)
s.write('\n')
ListOfTriangles = np.array(Tri).flatten()
Rounded8 = np.shape(ListOfTriangles)[0] / 8
N8 = 8 * Rounded8
Triangles8 = ListOfTriangles[0:N8]
ListTri8 = ListOfTriangles[0:N8]
RowsOf8 = np.split(Triangles8, N8 / 8)
for r in RowsOf8:
L = r.tolist()
Lint = map(int, L)
Line = ' ' + ' '.join(map(str, Lint))
s.write('%s\n' % Line)
L = ListOfTriangles[N8:].tolist()
Lint = map(int, L)
Line = ' ' + ' '.join(map(str, Lint))
s.write('%s\n' % Line)
def _write_vtk(filename, vertices, faces, data=None, comment=None):
'''
Creates ASCII coded vtk file from numpy arrays using pandas.
Inputs:
-------
(mandatory)
* filename: str, path to location where vtk file should be stored
* vertices: numpy array with vertex coordinates, shape (n_vertices, 3)
* faces: numpy array with face specifications, shape (n_faces, 3)
(optional)
* data: numpy array with data points, shape (n_vertices, n_datapoints)
NOTE: n_datapoints can be =1 but cannot be skipped (n_vertices,)
* comment: str, is written into the comment section of the vtk file
Usage:
---------------------
_write_vtk('/path/to/vtk/file.vtk', v_array, f_array)
'''
import pandas as pd
# infer number of vertices and faces
number_vertices = vertices.shape[0]
number_faces = faces.shape[0]
if data is not None:
number_data = data.shape[0]
# make header and subheader dataframe
header = ['# vtk DataFile Version 3.0',
'%s' % comment,
'ASCII',
'DATASET POLYDATA',
'POINTS %i float' % number_vertices
]
header_df = pd.DataFrame(header)
sub_header = ['POLYGONS %i %i' % (number_faces, 4 * number_faces)]
sub_header_df = pd.DataFrame(sub_header)
# make dataframe from vertices
vertex_df = pd.DataFrame(vertices)
# make dataframe from faces, appending first row of 3's
# (indicating the polygons are triangles)
triangles = np.reshape(3 * (np.ones(number_faces)), (number_faces, 1))
triangles = triangles.astype(int)
faces = faces.astype(int)
faces_df = pd.DataFrame(np.concatenate((triangles, faces), axis=1))
# write dfs to csv
header_df.to_csv(filename, header=None, index=False)
with open(filename, 'a') as f:
vertex_df.to_csv(f, header=False, index=False, float_format='%.3f',
sep=' ')
with open(filename, 'a') as f:
sub_header_df.to_csv(f, header=False, index=False)
with open(filename, 'a') as f:
faces_df.to_csv(f, header=False, index=False, float_format='%.0f',
sep=' ')
# if there is data append second subheader and data
if data is not None:
if len(data.shape)>1:
datapoints = data.shape[1]
sub_header2 = ['POINT_DATA %i' % (number_data),
'SCALARS Scalars float %i' % (datapoints),
'LOOKUP_TABLE default']
else:
datapoints = 1
sub_header2 = ['POINT_DATA %i' % (number_data),
'SCALARS Scalars float',
'LOOKUP_TABLE default']
sub_header_df2 = pd.DataFrame(sub_header2)
data_df = pd.DataFrame(data)
with open(filename, 'a') as f:
sub_header_df2.to_csv(f, header=False, index=False)
with open(filename, 'a') as f:
data_df.to_csv(f, header=False, index=False, float_format='%.16f',
sep=' ')
def _write_ply(filename, vertices, faces, comment=None):
import pandas as pd
print("writing ply format")
# infer number of vertices and faces
number_vertices = vertices.shape[0]
number_faces = faces.shape[0]
# make header dataframe
header = ['ply',
'format ascii 1.0',
'comment %s' % comment,
'element vertex %i' % number_vertices,
'property float x',
'property float y',
'property float z',
'element face %i' % number_faces,
'property list uchar int vertex_indices',
'end_header'
]
header_df = pd.DataFrame(header)
# make dataframe from vertices
vertex_df = pd.DataFrame(vertices)
# make dataframe from faces, adding first row of 3s (indicating triangles)
triangles = np.reshape(3 * (np.ones(number_faces)), (number_faces, 1))
triangles = triangles.astype(int)
faces = faces.astype(int)
faces_df = pd.DataFrame(np.concatenate((triangles, faces), axis=1))
# write dfs to csv
header_df.to_csv(filename, header=None, index=False)
with open(filename, 'a') as f:
vertex_df.to_csv(f, header=False, index=False,
float_format='%.3f', sep=' ')
with open(filename, 'a') as f:
faces_df.to_csv(f, header=False, index=False,
float_format='%.0f', sep=' ')
| apache-2.0 |
smartscheduling/scikit-learn-categorical-tree | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
vincentchoqueuse/parametrix | examples/ex_polynomial_fitting_MC.py | 1 | 1215 | from parametrix.polynomial.signal_models import M_Polynomial
from parametrix.polynomial.classifiers import C_ModelOrder_Polynomial_IC
from parametrix.monte_carlo.classifiers import MC_Simulations_classification
import numpy as np
import matplotlib.pyplot as plt
""" Example: Model order selection for polynomial fitting based oSee [SEG04]_
p(x)=1-0.5x-5x**2-1.5x**3
.. [SEG04] Seghouane, A-K., and Maiza Bekara. "A small sample model selection criterion based on Kullback's symmetric divergence." IEEE Transactions on Signal Processing 52.12 (2004): 3314-3323."""
an=np.array([1,-0.5,-5,-1.5])
x_axis=np.linspace(-3,3,30)
model=M_Polynomial(an,x_axis,0)
model.sigma2s=635.37 #integral in page 3318
## Model Order Selection
order_vect=np.arange(10)
classifier_AIC=C_ModelOrder_Polynomial_IC(order_vect,x_axis,method="AIC")
classifier_AICc=C_ModelOrder_Polynomial_IC(order_vect,x_axis,method="AICc")
classifier_BIC=C_ModelOrder_Polynomial_IC(order_vect,x_axis,method="BIC")
## Monte Carlo Simulation
mc=MC_Simulations_classification("SNR",np.arange(-10,15,2),[classifier_AIC,classifier_AICc,classifier_BIC])
mc.trials(model,nb_trials=1000,verbose=1,plot=1)
plt.show()
| bsd-3-clause |
tturowski/gwide | gwide/scripts/geneUsage.py | 1 | 4140 | #!/usr/bin/env python
__author__ = 'Tomasz Turowski'
__copyright__ = "Copyright 2015"
__version__ = "1.0"
__credits__ = ["Tomasz Turowski"]
__email__ = "[email protected]"
__status__ = "Production"
from tRNAFromConcatv2 import *
from optparse import OptionParser
import argparse
from argparse import RawTextHelpFormatter
import select, os, re
import pandas as pd
usage = "Usage: calculate gene usage for tRNA genes"
parser = argparse.ArgumentParser(usage=usage, formatter_class=RawTextHelpFormatter)
files = parser.add_argument_group('Options for input files')
files.add_argument("-w", dest="w_file", help="Provide the path to your tab file with anticodones wages",
metavar="FILE", default=None)
files.add_argument("-e", dest="e_file", help="Provide the path to your tab file tRNA expression",
metavar="FILE", default=None)
options = parser.parse_args()
#function from Internet by Tommy Tang
def ReverseComplement(seq):
seq_dict = {'A':'U','T':'A','G':'C','C':'G', 'a':'u','t':'a','g':'c','c':'g'}
return "".join([seq_dict[base] for base in reversed(seq)])
if not options.w_file or not options.e_file:
print "Some file(s) is missing, use -w and -e"
exit()
#making revere complement (DNA to RNA)
wages = pd.read_csv(options.w_file, sep='\t')
wages['wage'] = wages['wage'].convert_objects(convert_numeric=True)
for i, c in enumerate(wages['codone']):
wages.loc[i, 'anticodone'] = ReverseComplement(c)
#calculating % of codon usage
new_wages = pd.DataFrame()
for isotype in sorted(set(wages['isotype'].tolist())):
temp_df = wages[wages.isotype == isotype]
temp_df['wage_percent'] = temp_df['wage'].divide(temp_df.sum()['wage'], axis=0, level=None, fill_value=None)
temp_df['wage_percent_correction'] = 0
new_wages = new_wages.append(temp_df)
wages = new_wages
#calculating average hits and getting codone and anticodone from gene ID
expression = pd.read_csv(options.e_file, sep='\t')
expression['average'] = expression.mean(axis=1, numeric_only=True)
for i, gene_id in enumerate(expression['gene_id']):
expression.loc[i, 'anticodone'] = gene_id[3:6]
expression.loc[i, 'isotype'] = gene_id[1]
#calculating % of gene usage for each anticodone
new_expression = pd.DataFrame()
for anticodone in set(expression['anticodone'].tolist()):
temp_df = expression[expression.anticodone == anticodone]
temp_df['exp_percent_anticodone'] = temp_df['average'].divide(temp_df.sum()['average'], axis=0, level=None, fill_value=None)
new_expression = new_expression.append(temp_df)
expression = new_expression
# #calculating % of gene usage for each isotype
# new_expression2 = pd.DataFrame()
# for isotype in set(expression['isotype'].tolist()):
# temp_df = expression[expression.isotype == isotype]
# temp_df['exp_percent_isotype'] = temp_df['average'].divide(temp_df.sum()['average'], axis=0, level=None, fill_value=None)
# new_expression2 = new_expression2.append(temp_df)
# expression = new_expression2
#getting all alternative (wobble driven) codones in decoding
a = wages[['anticodone','codone', 'isotype', 'wage_percent']]
b = expression[['anticodone','isotype']]
b['gene'] = 'gene' #mark gene encoded anticodones
c = pd.ordered_merge(a,b)
d = c[c.gene != 'gene'] #leave only wobble-driven codones
d = d.sort('isotype')
#re-asigning codones to new anticodones and adding wages for some genes
wobble_driven_anticodones = list()
wobble_dict = {'A':'A','T':'G','G':'U','C':'A', 'a':'a','t':'G','g':'u','c':'a'}
for i, row in d.iterrows():
if row['isotype'] != 'Z':
anti1 = wobble_dict[row['codone'][2]]
anti23 = ReverseComplement(row['codone'])[1:3]
d.loc[i, 'recog_ant'] = anti1+anti23
wages.loc[wages.anticodone == anti1+anti23,'wage_percent_correction'] += row['wage_percent']
wages['wages_sum'] = wages['wage_percent']+wages['wage_percent_correction']
# print d
d.to_csv('wages', sep='\t')
# wages = new_wages.drop('isotype', axis=1)
# expression = new_expression.drop('isotype', axis=1)
olo = pd.ordered_merge(expression,new_wages)
# print olo
olo.to_csv('output', sep='\t')
print "Done." | apache-2.0 |
lukius/outlier-utils | outliers/smirnov_grubbs.py | 1 | 7308 | # -*- coding: utf-8 -*-
"""
Smirnov-Grubbs test for outlier detection.
"""
import numpy as np
from scipy import stats
from math import sqrt
from collections import defaultdict
try:
import pandas as pd
except ImportError:
pd = None
__all__ = ['test',
'two_sided_test',
'two_sided_test_indices',
'two_sided_test_outliers',
'min_test',
'min_test_indices',
'min_test_outliers',
'max_test',
'max_test_indices',
'max_test_outliers',
'TwoSidedGrubbsTest',
'MinValueGrubbsTest',
'MaxValueGrubbsTest',
'OutputType']
DEFAULT_ALPHA = 0.95
# Test output types
class OutputType:
DATA = 0 # Output data without outliers
OUTLIERS = 1 # Output outliers
INDICES = 2 # Output outlier indices
class GrubbsTest(object):
def __init__(self, data):
self.original_data = data
def _copy_data(self):
if isinstance(self.original_data, np.ndarray):
return self.original_data
elif pd is not None and isinstance(self.original_data, pd.Series):
return self.original_data
elif isinstance(self.original_data, list):
return np.array(self.original_data)
else:
raise TypeError('Unsupported data format')
def _delete_item(self, data, index):
if pd is not None and isinstance(data, pd.Series):
return data.drop(index)
elif isinstance(data, np.ndarray):
return np.delete(data, index)
else:
raise TypeError('Unsupported data format')
def _get_indices(self, values):
last_seen = defaultdict(lambda: 0)
data = list(self.original_data)
indices = list()
for value in values:
start = last_seen[value]
index = data.index(value, start)
indices.append(index)
last_seen[value] = index + 1
return indices
def _get_g_test(self, data, alpha):
"""Compute a significant value score following these steps, being alpha
the requested significance level:
1. Find the upper critical value of the t-distribution with n-2
degrees of freedom and a significance level of alpha/2n
(for two-sided tests) or alpha/n (for one-sided tests).
2. Use this t value to find the score with the following formula:
((n-1) / sqrt(n)) * (sqrt(t**2 / (n-2 + t**2)))
:param numpy.array data: data set
:param float alpha: significance level
:return: G_test score
"""
n = len(data)
significance_level = self._get_t_significance_level(alpha, n)
t = stats.t.isf(significance_level, n-2)
return ((n-1) / sqrt(n)) * (sqrt(t**2 / (n-2 + t**2)))
def _test_once(self, data, alpha):
"""Perform one iteration of the Smirnov-Grubbs test.
:param numpy.array data: data set
:param float alpha: significance level
:return: the index of the outlier if one if found; None otherwise
"""
target_index, value = self._target(data)
g = value / data.std()
g_test = self._get_g_test(data, alpha)
return target_index if g > g_test else None
def run(self, alpha=DEFAULT_ALPHA, output_type=OutputType.DATA):
"""Run the Smirnov-Grubbs test to remove outliers in the given data set.
:param float alpha: significance level
:param int output_type: test output type (from OutputType class values)
:return: depending on the value of output_type, the data set without
outliers (DATA), the outliers themselves (OUTLIERS) or the indices of
the outliers in the original data set (INDICES)
"""
data = self._copy_data()
outliers = list()
while True:
outlier_index = self._test_once(data, alpha)
if outlier_index is None:
break
outlier = data[outlier_index]
outliers.append(outlier)
data = self._delete_item(data, outlier_index)
return_value = data
if output_type == OutputType.OUTLIERS:
return_value = outliers
elif output_type == OutputType.INDICES:
return_value = self._get_indices(outliers)
return return_value
def _target(self, data):
raise NotImplementedError
def _get_t_significance_level(self, alpha):
raise NotImplementedError
class TwoSidedGrubbsTest(GrubbsTest):
def _target(self, data):
"""Compute the index of the farthest value from the sample mean and its
distance.
:param numpy.array data: data set
:return int, float: the index of the element and its distance to the
mean
"""
relative_values = abs(data - data.mean())
index = relative_values.argmax()
value = relative_values[index]
return index, value
def _get_t_significance_level(self, alpha, n):
return alpha / (2*n)
class OneSidedGrubbsTest(GrubbsTest):
def _target(self, data):
"""Compute the index of the min/max value and its distance from the
sample mean.
:param numpy.array data: data set
:return int, float: the index of the min/max value and its distance to
the mean
"""
index = self._get_index(data)
value = data[index]
return index, abs(value - data.mean())
def _get_t_significance_level(self, alpha, n):
return alpha / n
class MinValueGrubbsTest(OneSidedGrubbsTest):
def _get_index(self, data):
return data.argmin()
class MaxValueGrubbsTest(OneSidedGrubbsTest):
def _get_index(self, data):
return data.argmax()
# Convenience functions to run single Grubbs tests
def _test(test_class, data, alpha, output_type):
return test_class(data).run(alpha, output_type=output_type)
def _two_sided_test(data, alpha, output_type):
return _test(TwoSidedGrubbsTest, data, alpha, output_type)
def _min_test(data, alpha, output_type):
return _test(MinValueGrubbsTest, data, alpha, output_type)
def _max_test(data, alpha, output_type):
return _test(MaxValueGrubbsTest, data, alpha, output_type)
def two_sided_test(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.DATA)
def two_sided_test_indices(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.INDICES)
def two_sided_test_outliers(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.OUTLIERS)
def min_test(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.DATA)
def min_test_indices(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.INDICES)
def min_test_outliers(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.OUTLIERS)
def max_test(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.DATA)
def max_test_indices(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.INDICES)
def max_test_outliers(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.OUTLIERS)
def test(data, alpha=DEFAULT_ALPHA):
return two_sided_test(data, alpha)
| mit |
mhue/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ChristophKirst/ClearMapUnstable | ClearMap/Visualization/Plot.py | 1 | 9095 | # -*- coding: utf-8 -*-
"""
Plotting routines for overlaying labels, tilings, and sectioning of 3d data sets
Supported functionality:
* plot volumetric data as a sequence of tiles via :func:`plotTiling`
* overlay points on images via :func:`overlayPoints` and
:func:`plotOverlayPoints`
* overlay labeled images on gray scale images via :func:`overlayLabel` and
:func:`plotOverlayLabel`
"""
#:copyright: Copyright 2015 by Christoph Kirst, The Rockefeller University, New York City
#:license: GNU, see LICENSE.txt for details.
import math
import numpy
import matplotlib as mpl
import matplotlib.pyplot as plt
import ClearMap.IO as io
import ClearMap.Analysis.Voxelization as vox
def plotTiling(dataSource, tiling = "automatic", maxtiles = 20, x = all, y = all, z = all, inverse = False):
"""Plot 3d image as 2d tiles
Arguments:
dataSouce (str or array): volumetric image data
tiling (str or tuple): tiling specification
maxtiles: maximalnumber of tiles
x, y, z (all or tuple): sub-range specification
inverse (bool):invert image
Returns:
(object): figure handle
"""
image = io.readData(dataSource, x = x, y = y, z = z);
dim = image.ndim;
if dim < 2 or dim > 4:
raise StandardError('plotTiling: image dimension must be 2 to 4');
if dim == 2:
image = image.reshape(image.shape + (1,));
dim = 3;
if image.ndim == 3:
if image.shape[2] == 3: # 2d color image
ntiles = 1;
cmap = None;
image = image.reshape((image.shape[0], image.shape[1], 1, 3));
else: # 3d gray image
ntiles = image.shape[2];
cmap = plt.cm.gray;
image = image.reshape(image.shape + (1,));
else:
ntiles = image.shape[2]; # 3d color = 4d
cmap = None;
if ntiles > maxtiles:
print "plotTiling: number of tiles %d very big! Clipping at %d!" % (ntiles, maxtiles);
ntiles = maxtiles;
if tiling == "automatic":
nx = math.floor(math.sqrt(ntiles));
ny = int(math.ceil(ntiles / nx));
nx = int(nx);
else:
nx = int(tiling[0]);
ny = int(tiling[1]);
#print image.shape
fig, axarr = plt.subplots(nx, ny, sharex = True, sharey = True);
fig.subplots_adjust(wspace=0.05, hspace=0.05);
axarr = numpy.array(axarr);
axarr = axarr.flatten();
imin = image.min();
imax = image.max();
if inverse:
(imin, imax) = (-float(imax), -float(imin));
#print imin, imax
for i in range(0, ntiles):
a = axarr[i];
imgpl = image[:,:,i,:].copy();
imgpl = imgpl.transpose([1,0,2]);
if imgpl.shape[2] == 1:
imgpl = imgpl.reshape((imgpl.shape[0], imgpl.shape[1]));
if inverse:
imgpl = -imgpl.astype('float');
#a.imshow(imgpl, interpolation='none', cmap = cmap, vmin = imin, vmax = imax);
a.imshow(imgpl, interpolation='none', cmap = cmap, vmin = imin, vmax = imax);
#fig.canvas.manager.window.activateWindow()
#fig.canvas.manager.window.raise_()
return fig;
def overlayLabel(dataSource, labelSource, sink = None, alpha = False, labelColorMap = 'jet', x = all, y = all, z = all):
"""Overlay a gray scale image with colored labeled image
Arguments:
dataSouce (str or array): volumetric image data
labelSource (str or array): labeled image to be overlayed on the image data
sink (str or None): destination for the overlayed image
alpha (float or False): transparency
labelColorMap (str or object): color map for the labels
x, y, z (all or tuple): sub-range specification
Returns:
(array or str): figure handle
See Also:
:func:`overlayPoints`
"""
label = io.readData(labelSource, x = x, y = y, z = z);
image = io.readData(dataSource, x = x, y = y, z = z);
lmax = label.max();
if lmax <= 1:
carray = numpy.array([[1,0,0,1]]);
else:
cm = mpl.cm.get_cmap(labelColorMap);
cNorm = mpl.colors.Normalize(vmin=1, vmax = int(lmax));
carray = mpl.cm.ScalarMappable(norm=cNorm, cmap=cm);
carray = carray.to_rgba(numpy.arange(1, int(lmax + 1)));
if alpha == False:
carray = numpy.concatenate(([[0,0,0,1]], carray), axis = 0);
else:
carray = numpy.concatenate(([[1,1,1,1]], carray), axis = 0);
cm = mpl.colors.ListedColormap(carray);
carray = cm(label);
carray = carray.take([0,1,2], axis = -1);
if alpha == False:
cimage = (label == 0) * image;
cimage = numpy.repeat(cimage, 3);
cimage = cimage.reshape(image.shape + (3,));
cimage = cimage.astype(carray.dtype);
cimage += carray;
else:
cimage = numpy.repeat(image, 3);
cimage = cimage.reshape(image.shape + (3,));
cimage = cimage.astype(carray.dtype);
cimage *= carray;
return io.writeData(sink, cimage);
def plotOverlayLabel(dataSource, labelSource, alpha = False, labelColorMap = 'jet', x = all, y = all, z = all, tiling = "automatic", maxtiles = 20):
"""Plot gray scale image overlayed with labeled image
Arguments:
dataSouce (str or array): volumetric image data
labelSource (str or array): labeled image to be overlayed on the image data
alpha (float or False): transparency
labelColorMap (str or object): color map for the labels
x, y, z (all or tuple): sub-range specification
tiling (str or tuple): tiling specification
maxtiles: maximalnumber of tiles
Returns:
(object): figure handle
See Also:
:func:`overlayLabel`
"""
ov = overlayLabel(dataSource, labelSource, alpha = alpha, labelColorMap = labelColorMap, x = x, y = y, z = z);
return plotTiling(ov, tiling = tiling, maxtiles = maxtiles);
def overlayPoints(dataSource, pointSource, sink = None, pointColor = [1,0,0], x = all, y = all, z = all):
"""Overlay points on 3D data and return as color image
Arguments:
dataSouce (str or array): volumetric image data
pointSource (str or array): point data to be overlayed on the image data
pointColor (array): RGB color for the overlayed points
x, y, z (all or tuple): sub-range specification
Returns:
(str or array): image overlayed with points
See Also:
:func:`overlayLabel`
"""
data = io.readData(dataSource, x = x, y = y, z = z);
points = io.readPoints(pointSource, x = x, y = y, z = z, shift = True);
#print data.shape
if not pointColor is None:
dmax = data.max(); dmin = data.min();
if dmin == dmax:
dmax = dmin + 1;
cimage = numpy.repeat( (data - dmin) / (dmax - dmin), 3);
cimage = cimage.reshape(data.shape + (3,));
if data.ndim == 2:
for p in points: # faster version using voxelize ?
cimage[p[0], p[1], :] = pointColor;
elif data.ndim == 3:
for p in points: # faster version using voxelize ?
cimage[p[0], p[1], p[2], :] = pointColor;
else:
raise RuntimeError('overlayPoints: data dimension %d not suported' % data.ndim);
else:
cimage = vox.voxelize(points, data.shape, method = 'Pixel');
cimage = cimage.astype(data.dtype) * data.max();
data.shape = data.shape + (1,);
cimage.shape = cimage.shape + (1,);
cimage = numpy.concatenate((data, cimage), axis = 3);
#print cimage.shape
return io.writeData(sink, cimage);
def plotOverlayPoints(dataSource, pointSource, pointColor = [1,0,0], x = all, y = all, z = all):
"""Plot points overlayed on gray scale 3d image as tiles.
Arguments:
dataSouce (str or array): volumetric image data
pointSource (str or array): point data to be overlayed on the image data
pointColor (array): RGB color for the overlayed points
x, y, z (all or tuple): sub-range specification
Returns:
(object): figure handle
See Also:
:func:`plotTiling`
"""
cimg = overlayPoints(dataSource, pointSource, pointColor = pointColor, x = x, y = y, z = z);
return plotTiling(cimg);
def test():
"""Test Plot module"""
import numpy as np
import ClearMap.Visualization.Plot as self
reload(self)
l = np.array([[0,0,0,0,0], [0,1,1,0,0], [3,0,5,0,2], [5,0,0,0,0], [4,4,0,0,0]])
x = np.random.rand(5,5);
self.plotOverlayLabel(x,l, alpha = False);
self.plotOverlayLabel(x,l, alpha = True);
#
x = np.random.rand(50,20);
p = np.array([[10,15], [40,10]]);
self.plotOverlayPoints(x, p)
if __name__ == "__main__":
test();
| gpl-3.0 |
JoostVisser/ml-assignment2 | mglearn/plot_dbscan.py | 1 | 1675 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn.datasets import make_blobs
from .plot_helpers import discrete_scatter
def plot_dbscan():
X, y = make_blobs(random_state=0, n_samples=12)
dbscan = DBSCAN()
clusters = dbscan.fit_predict(X)
clusters
fig, axes = plt.subplots(3, 4, figsize=(11, 8), subplot_kw={'xticks': (), 'yticks': ()})
# Plot clusters as red, green and blue, and outliers (-1) as white
colors = ['r', 'g', 'b']
markers = ['o', '^', 'v']
# iterate over settings of min_samples and eps
for i, min_samples in enumerate([2, 3, 5]):
for j, eps in enumerate([1, 1.5, 2, 3]):
# instantiate DBSCAN with a particular setting
dbscan = DBSCAN(min_samples=min_samples, eps=eps)
# get cluster assignments
clusters = dbscan.fit_predict(X)
print("min_samples: %d eps: %f cluster: %s" % (min_samples, eps, clusters))
if np.any(clusters == -1):
c = ['w'] + colors
m = ['o'] + markers
else:
c = colors
m = markers
discrete_scatter(X[:, 0], X[:, 1], clusters, ax=axes[i, j], c=c, s=8, markers=m)
inds = dbscan.core_sample_indices_
# vizualize core samples and clusters.
if len(inds):
discrete_scatter(X[inds, 0], X[inds, 1], clusters[inds],
ax=axes[i, j], s=15, c=colors,
markers=markers)
axes[i, j].set_title("min_samples: %d eps: %.1f" % (min_samples, eps))
fig.tight_layout()
| mit |
aerosara/thesis | notebooks_archive_10112014/thesis_functions/visualization.py | 1 | 2680 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def CreatePlotGrid(title, xlabel, ylabel, zlabel, aspectmode):
# plot
fig, ((axXZ, axYZ), (axXY, ax3D)) = plt.subplots(2, 2)
fig.suptitle(title, fontsize=14)
fig.set_size_inches(10, 10)
fig.subplots_adjust(hspace=0.2)
fig.subplots_adjust(wspace=0.5)
# XZ Plane
axXZ.set_title(xlabel + zlabel + ' Plane')
axXZ.xaxis.set_label_text(xlabel + ' axis')
axXZ.yaxis.set_label_text(zlabel + ' axis')
axXZ.set_aspect(aspectmode)
# YZ Plane
axYZ.set_title(ylabel + zlabel + ' Plane')
axYZ.xaxis.set_label_text(ylabel + ' axis')
axYZ.yaxis.set_label_text(zlabel + ' axis')
axYZ.set_aspect(aspectmode)
# XY Plane
axXY.set_title(xlabel + ylabel + ' Plane')
axXY.xaxis.set_label_text(xlabel + ' axis')
axXY.yaxis.set_label_text(ylabel + ' axis')
axXY.set_aspect(aspectmode)
# plot in 3D
ax3D.axis('off')
ax3D = fig.add_subplot(224, projection='3d') # "224" means "2x2 grid, 4th subplot"
ax3D.set_title('3D View in ' + xlabel + ylabel + zlabel + ' Frame')
ax3D.xaxis.set_label_text(xlabel + ' axis')
ax3D.yaxis.set_label_text(ylabel + ' axis')
ax3D.zaxis.set_label_text(zlabel + ' axis')
ax3D.set_aspect(aspectmode)
return axXZ, axYZ, axXY, ax3D
# Allowed colors:
# b: blue
# g: green
# r: red
# c: cyan
# m: magenta
# y: yellow
# k: black
# w: white
def SetPlotGridData(axXZ, axYZ, axXY, ax3D, data, points):
# add points to plots
for key in points:
axXZ.plot(points[key]['xyz'][0], points[key]['xyz'][2], 'o', markersize=5, label=key, color=points[key]['color'])
axYZ.plot(points[key]['xyz'][1], points[key]['xyz'][2], 'o', markersize=5, label=key, color=points[key]['color'])
axXY.plot(points[key]['xyz'][0], points[key]['xyz'][1], 'o', markersize=5, label=key, color=points[key]['color'])
ax3D.plot([points[key]['xyz'][0]], [points[key]['xyz'][1]], [points[key]['xyz'][2]], 'o', markersize=5, label=key, color=points[key]['color'])
# add data to plots
for key in data:
axXZ.plot(data[key]['x'], data[key]['z'], '-', label=key, color=data[key]['color'])
axYZ.plot(data[key]['y'], data[key]['z'], '-', label=key, color=data[key]['color'])
axXY.plot(data[key]['x'], data[key]['y'], '-', label=key, color=data[key]['color'])
ax3D.plot(data[key]['x'], data[key]['y'], data[key]['z'], '-', label=key, color=data[key]['color'])
#ax3D.legend(loc='center left', bbox_to_anchor=(1.2, 0.5))
| mit |
joachimwolff/minHashNearestNeighbors | sparse_neighbors_search/neighbors/wtaHash.py | 1 | 30894 | # Copyright 2016, 2017, 2018, 2019, 2020 Joachim Wolff
# PhD Thesis
#
# Copyright 2015, 2016 Joachim Wolff
# Master Thesis
# Tutor: Fabrizio Costa
# Winter semester 2015/2016
#
# Chair of Bioinformatics
# Department of Computer Science
# Faculty of Engineering
# Albert-Ludwigs-University Freiburg im Breisgau
__author__ = 'joachimwolff'
from scipy.sparse import csr_matrix
from .nearestNeighborsCppInterface import _NearestNeighborsCppInterface
class WtaHash():
"""Approximate unsupervised learner for implementing neighbor searches on sparse data sets. Based on a
dimension reduction with winner takes is all hash.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
fast : {True, False}, optional (default = False)
- True: will only use an inverse index to compute a k_neighbor query.
- False: an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
number_of_hash_functions : int, optional (default = '400')
Number of hash functions to use for computing the inverse index.
max_bin_size : int, optional (default = 50)
The number of maximal collisions for one hash value of one hash function. If one value of a hash function
has more collisions, this value will be ignored.
minimal_blocks_in_common : int, optional (default = 1)
The minimal number of hash collisions two instances have to be in common to be recognised. Everything less
will be ignored.
shingle_size : int, optional (default = 4)
Reduction factor for the signature size.
E.g. number_of_hash_functions=400 and shingle_size=4 --> Size of the signature will be 100
excess_factor : int, optional (default = 5)
Factor to return more neighbors internally as defined with n_neighbors. Factor is useful to increase the
precision of the :meth:`algorithm=exact` version of the implementation.
E.g.: n_neighbors = 5, excess_factor = 5. Internally n_neighbors*excess_factor = 25 neighbors will be returned.
Now the reduced data set for sklearn.NearestNeighbors is of size 25 and not 5.
similarity : {True, False}, optional (default = False)
If true: cosine similarity is used
If false: Euclidean distance is used
number_of_cores : int, optional (default = None)
Number of cores that should be used for openmp. If your system doesn't support openmp, this value
will have no effect. If it supports openmp and it is not defined, the maximum number of cores is used.
chunk_size : int, optional (default = None)
Number of elements one cpu core should work on. If it is set to "0" the default behaviour of openmp is used;
all cores are getting the same amount of data at once; e.g. 8-core cpu and 128 elements to process, every core will
get 16 elements at once.
prune_inverse_index : int, optional (default = -1)
Remove every hash value with less occurence than n. If -1 it is deactivated.
prune_inverse_index_after_instance: float, optional (default = -1.0)
Start all the pruning routines after x% of the data during the fitting process.
remove_hash_function_with_less_entries_as: int, optional (default =-1)
Remove every hash function with less hash values as n.
block_size : int, optional (default = 5)
How much more hash functions should be computed. Number is relevant for the shingels.
shingle : int, optional (default = 0)
store_value_with_least_sigificant_bit : int, optional (default = 0)
gpu_hashing : int, optional (default = 1)
If the hashing of WtaHash should be computed on the GPU (1) but the prediction is computed on the CPU.
If 0 it is deactivated.
speed_optimized : {True, False}, optional (default = None)
A parameter setting that is optimized for the best speed. Can not be used together with the parameter 'accuracy_optimized'.
If bad results are computed, try 'accuracy_optimized' or optimize the parameters with a hyperparameter optimization.
accuracy_optimized : {True, False}, optional (default = None)
A parameter setting that is optimized for the best accuracy. Can not be used together with the parameter 'speed_optimized'.
If results are computed to slow, try 'speed_optimized' or optimize the parameters with a hyperparameter optimization.
Notes
-----
The documentation is copied from scikit-learn and was only extend for a few cases. All examples are available there.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors
Sources:
Basic algorithm:
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Idea behind implementation:
https://en.wikipedia.org/wiki/Locality-sensitive_hashing
Implementation is using scikit learn:
http://scikit-learn.org/dev/index.html
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors
Algorithm based on:
Heyne, S., Costa, F., Rose, D., & Backofen, R. (2012).
GraphClust: alignment-free structural clustering of local RNA secondary structures.
Bioinformatics, 28(12), i224-i232.
http://bioinformatics.oxfordjournals.org/content/28/12/i224.full.pdf+html
Winner takes it all hash based on:
Yagnik, Jay, et al. "The power of comparative reasoning." C
omputer Vision (ICCV), 2011 IEEE International Conference on. IEEE, 2011.
http://www.dennis-strelow.com/papers/documents/iccv11.pdf
"""
def __init__(self, n_neighbors=5, radius=1.0, fast=False, number_of_hash_functions=400, rangeK_wta=20,
max_bin_size=50, minimal_blocks_in_common=1, shingle_size=4, excess_factor=5,
similarity=False, number_of_cores=None, chunk_size=None, prune_inverse_index=-1,
prune_inverse_index_after_instance=-1.0, remove_hash_function_with_less_entries_as=-1,
block_size=5, shingle=0, store_value_with_least_sigificant_bit=0,
speed_optimized=None, accuracy_optimized=None, absolute_numbers=False): # cpu_gpu_load_balancing=0,
if speed_optimized is not None and accuracy_optimized is not None:
print("Speed optimization and accuracy optimization at the same time is not possible.")
return
if speed_optimized:
number_of_hash_functions = 186
max_bin_size = 87
minimal_blocks_in_common = 1
shingle_size = 1
excess_factor = 11
prune_inverse_index = 6
prune_inverse_index_after_instance = 0.0
remove_hash_function_with_less_entries_as = 0
block_size = 1
shingle = 0
store_value_with_least_sigificant_bit = 1
rangeK_wta = 17
elif accuracy_optimized:
number_of_hash_functions = 739
max_bin_size = 30
minimal_blocks_in_common = 1
shingle_size = 3
excess_factor = 14
prune_inverse_index = 1
prune_inverse_index_after_instance = 0.5
remove_hash_function_with_less_entries_as = 0
block_size = 1
shingle = 0
store_value_with_least_sigificant_bit = 2
rangeK_wta = 23
cpu_gpu_load_balancing = 0
self._nearestNeighborsCppInterface = _NearestNeighborsCppInterface(n_neighbors=n_neighbors, radius=radius,
fast=fast, number_of_hash_functions=number_of_hash_functions,
max_bin_size=max_bin_size, minimal_blocks_in_common=minimal_blocks_in_common,
shingle_size=shingle_size, excess_factor=excess_factor,
similarity=similarity, number_of_cores=number_of_cores, chunk_size=chunk_size, prune_inverse_index=prune_inverse_index,
prune_inverse_index_after_instance=prune_inverse_index_after_instance,
remove_hash_function_with_less_entries_as=remove_hash_function_with_less_entries_as,
hash_algorithm=1, block_size=block_size, shingle=shingle,
store_value_with_least_sigificant_bit=store_value_with_least_sigificant_bit,
cpu_gpu_load_balancing=cpu_gpu_load_balancing, gpu_hashing=0, rangeK_wta=rangeK_wta)
def __del__(self):
del self._nearestNeighborsCppInterface
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix}, optional
Training data. If array or matrix, shape = [n_samples, n_features]
If X is None, a "lazy fitting" is performed. If kneighbors is called, the fitting
with with the data there is done. Also the caching of computed hash values is deactivated in
this case.
y : list, optional (default = None)
List of classes for the given input of X. Size have to be n_samples."""
self._nearestNeighborsCppInterface.fit(X=X, y=y)
def partial_fit(self, X, y=None):
"""Extend the model by X as additional training data.
Parameters
----------
X : {array-like, sparse matrix}
Training data. Shape = [n_samples, n_features]
y : list, optional (default = None)
List of classes for the given input of X. Size have to be n_samples."""
self._nearestNeighborsCppInterface.partial_fit(X=X, y=y)
def kneighbors(self, X=None, n_neighbors=None, return_distance=True, fast=None, similarity=None, pAbsoluteNumbers=None):
"""Finds the n_neighbors of a point X or of all points of X.
Parameters
----------
X : {array-like, sparse matrix}, optional
Data point(s) to be searched for n_neighbors. Shape = [n_samples, n_features]
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, optional
Number of neighbors to get (default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
fast : {'True', 'False'}, optional (default = 'None')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
Returns
-------
dist : array, shape = [n_samples, distances]
Array representing the lengths to points, only present if
return_distance=True
ind : array, shape = [n_samples, neighbors]
Indices of the nearest points in the population matrix."""
return self._nearestNeighborsCppInterface.kneighbors(X=X, n_neighbors=n_neighbors,
return_distance=return_distance,
fast=fast, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity', fast=None, symmetric=True, similarity=None, pAbsoluteNumbers=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
fast : {'True', 'False'}, optional (default = 'None')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
symmetric: {True, False} (default = True)
If true the returned graph is symmetric, otherwise not.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
"""
return self._nearestNeighborsCppInterface.kneighbors_graph(X=X, n_neighbors=n_neighbors, mode=mode,
fast=fast, symmetric=symmetric, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def radius_neighbors(self, X=None, radius=None, return_distance=None, fast=None, similarity=None, pAbsoluteNumbers=None):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
fast : bool, optional (default = 'False')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points."""
return self._nearestNeighborsCppInterface.radius_neighbors(X=X, radius=radius,
return_distance=return_distance,
fast=fast, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity', fast=None, symmetric=True, similarity=None, pAbsoluteNumbers=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
fast : bool, optional (default = 'False')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
symmetric: {True, False} (default = True)
If true the returned graph is symmetric, otherwise not.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j."""
return self._nearestNeighborsCppInterface.radius_neighbors_graph(X=X, radius=radius, mode=mode,
fast=fast, symmetric=symmetric, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def fit_kneighbors(self, X, n_neighbors=None, return_distance=True, fast=None, similarity=None, pAbsoluteNumbers=None):
""""Fits and returns the n_neighbors of X.
Parameters
----------
X : {array-like, sparse matrix}, optional
Data point(s) to be searched for n_neighbors. Shape = [n_samples, n_features]
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, optional
Number of neighbors to get (default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
fast : {'True', 'False'}, optional (default = 'None')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
Returns
-------
dist : array, shape = [n_samples, distances]
Array representing the lengths to points, only present if
return_distance=True
ind : array, shape = [n_samples, neighbors]
Indices of the nearest points in the population matrix."""
return self._nearestNeighborsCppInterface.fit_kneighbors(X=X, n_neighbors=n_neighbors,
return_distance=return_distance,
fast=fast, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def fit_kneighbor_graph(self, X, n_neighbors=None, mode='connectivity', fast=None, symmetric=True, similarity=None, pAbsoluteNumbers=None):
"""Fits and computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
fast : {'True', 'False'}, optional (default = 'None')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
symmetric: {True, False} (default = True)
If true the returned graph is symmetric, otherwise not.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
"""
return self._nearestNeighborsCppInterface.fit_kneighbor_graph(X=X, n_neighbors=n_neighbors,
mode=mode, fast=fast,
symmetric=symmetric,
similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def fit_radius_neighbors(self, X, radius=None, return_distance=None, fast=None, similarity=None, pAbsoluteNumbers=None):
"""Fits the data and finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
fast : bool, optional (default = 'False')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points."""
return self._nearestNeighborsCppInterface.fit_radius_neighbors(X=X, radius=radius,
return_distance=return_distance,
fast=fast, similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def fit_radius_neighbors_graph(self, X, radius=None, mode='connectivity', fast=None, symmetric=True, similarity=None, pAbsoluteNumbers=None):
"""Fits and computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
fast : bool, optional (default = 'False')
- 'True': will only use an inverse index to compute a k_neighbor query.
- 'False': an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
If not passed, default value is what was passed to the constructor.
similarity: {True, False}, optional (default = None)
If true: cosine similarity is used
If false: Euclidean distance is used
If None: Value that was defined at the init is taken.
symmetric: {True, False} (default = True)
If true the returned graph is symmetric, otherwise not.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j."""
return self._nearestNeighborsCppInterface.fit_radius_neighbors_graph(X=X, radius=radius,
mode=mode, fast=fast,
symmetric=symmetric,
similarity=similarity, pAbsoluteNumbers=pAbsoluteNumbers)
def get_distribution_of_inverse_index(self):
"""Returns the number of created hash values per hash function,
the average size of elements per hash value per hash function,
the mean and the standard deviation."""
return self._nearestNeighborsCppInterface.get_distribution_of_inverse_index()
def _getY(self):
return self._nearestNeighborsCppInterface._getY()
def _getY_is_csr(self):
return self._nearestNeighborsCppInterface._getY_is_csr()
| mit |
PrashntS/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/semi_supervised/label_propagation.py | 39 | 16726 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
comepradz/pybrain | examples/rl/environments/shipsteer/shipbench_sde.py | 26 | 3454 | from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
| bsd-3-clause |
madjam/mxnet | example/gluon/kaggle_k_fold_cross_validation.py | 25 | 6871 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.collect_params().initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| apache-2.0 |
theodoregoetz/wernher | examples/ground_track.py | 1 | 1703 | import wernher
from matplotlib import pyplot
import numpy as np
π = np.pi
deg = π/180
km = 1000
body = wernher.CelestialBody(
name = 'kerbin',
gravitational_parameter = 3.5316e12,
equatorial_radius = 600*km,
rotational_speed = 2*π/21600)
orbit = wernher.Orbit(
t0 = 0,
i = 30*deg,
Ω = 0*deg,
ω = 15*deg,
pe_alt = 100*km,
ap_alt = 200*km,
M0 = -45*deg,
body = body)
# ground track consists of 200 points
npoints = 200
# start in the past by 1/4 of the orbital period
tmin = orbit.epoch - 0.25*orbit.period
# plot 1.5 periods of ground track
tmax = tmin + 1.5*orbit.period
# array of times - evenly spaced
tt = np.linspace(tmin,tmax,npoints)
# array of lattitude and longitudes, converted to degrees
lat = orbit.latitude_at_time(tt) / deg
lon = orbit.longitude_at_time(tt) / deg
# calculate radius (to be used to set color of track
r = orbit.radius_at_time(tt)
# create figure and add map view, track and position marker
fig,ax = pyplot.subplots()
mview = wernher.MapView(orbit.body)
mview.zoomlevel = 1
mview.plot_basemap(ax)
# plot ground track
tk = wernher.MapView.plot_track(ax,lat,lon,r)
# place marker for the vessel location
mk = wernher.MapView.plot_marker(ax,
orbit.latitude_at_epoch/deg,
orbit.longitude_at_epoch/deg,
color='red')
# next periapsis marker
mk = wernher.MapView.plot_marker(ax,
orbit.latitude_at_periapsis()/deg,
orbit.longitude_at_periapsis()/deg,
marker='v', color='cyan')
# next apoapsis marker
mk = wernher.MapView.plot_marker(ax,
orbit.latitude_at_apoapsis()/deg,
orbit.longitude_at_apoapsis()/deg,
marker='^', color='magenta')
# show plot in new window
pyplot.show()
| gpl-3.0 |
evanbiederstedt/RRBSfun | epiphen/cll_tests/total_CLL_chr07.py | 1 | 8306 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr7_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GACACG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG",
"RBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.ACAACC",
"RRBS_trito_pool_1_TAAGGCGA.ACGTGG",
"RRBS_trito_pool_1_TAAGGCGA.ACTCAC",
"RRBS_trito_pool_1_TAAGGCGA.ATAGCG",
"RRBS_trito_pool_1_TAAGGCGA.ATCGAC",
"RRBS_trito_pool_1_TAAGGCGA.CAAGAG",
"RRBS_trito_pool_1_TAAGGCGA.CATGAC",
"RRBS_trito_pool_1_TAAGGCGA.CCTTCG",
"RRBS_trito_pool_1_TAAGGCGA.CGGTAG",
"RRBS_trito_pool_1_TAAGGCGA.CTATTG",
"RRBS_trito_pool_1_TAAGGCGA.GACACG",
"RRBS_trito_pool_1_TAAGGCGA.GCATTC",
"RRBS_trito_pool_1_TAAGGCGA.GCTGCC",
"RRBS_trito_pool_1_TAAGGCGA.GGCATC",
"RRBS_trito_pool_1_TAAGGCGA.GTGAGG",
"RRBS_trito_pool_1_TAAGGCGA.GTTGAG",
"RRBS_trito_pool_1_TAAGGCGA.TAGCGG",
"RRBS_trito_pool_1_TAAGGCGA.TATCTC",
"RRBS_trito_pool_1_TAAGGCGA.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.TGACAG",
"RRBS_trito_pool_1_TAAGGCGA.TGCTGC",
"RRBS_trito_pool_2_CGTACTAG.ACAACC",
"RRBS_trito_pool_2_CGTACTAG.ACGTGG",
"RRBS_trito_pool_2_CGTACTAG.ACTCAC",
"RRBS_trito_pool_2_CGTACTAG.AGGATG",
"RRBS_trito_pool_2_CGTACTAG.ATAGCG",
"RRBS_trito_pool_2_CGTACTAG.ATCGAC",
"RRBS_trito_pool_2_CGTACTAG.CAAGAG",
"RRBS_trito_pool_2_CGTACTAG.CATGAC",
"RRBS_trito_pool_2_CGTACTAG.CCTTCG",
"RRBS_trito_pool_2_CGTACTAG.CGGTAG",
"RRBS_trito_pool_2_CGTACTAG.CTATTG",
"RRBS_trito_pool_2_CGTACTAG.GACACG",
"RRBS_trito_pool_2_CGTACTAG.GCATTC",
"RRBS_trito_pool_2_CGTACTAG.GCTGCC",
"RRBS_trito_pool_2_CGTACTAG.GGCATC",
"RRBS_trito_pool_2_CGTACTAG.GTGAGG",
"RRBS_trito_pool_2_CGTACTAG.GTTGAG",
"RRBS_trito_pool_2_CGTACTAG.TAGCGG",
"RRBS_trito_pool_2_CGTACTAG.TATCTC",
"RRBS_trito_pool_2_CGTACTAG.TCTCTG",
"RRBS_trito_pool_2_CGTACTAG.TGACAG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott.to_csv("total_CLL_chrom07.phy", header=None, index=None)
print(tott.shape)
| mit |
rrohan/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
treycausey/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 1 | 12293 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD 3 clause
from tempfile import mkdtemp
import warnings
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
X = np.ones((5, 5))
assert_raises(ValueError,
AgglomerativeClustering(linkage='foobar').fit,
X)
assert_raises(ValueError, linkage_tree, X, linkage='foobar')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", DeprecationWarning)
# Use the copy argument, to raise a warning
Ward(copy=True).fit(X)
# We should be getting 2 warnings: one for using Ward that is
# deprecated, one for using the copy argument
assert_equal(len(warning_list), 2)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
"""
Check that we obtain the correct solution for structured linkage trees.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
"""
Check that we obtain the correct solution for unstructured linkage trees.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("ignore", DeprecationWarning)
children, n_nodes, n_leaves, parent = assert_warns(UserWarning,
ward_tree,
this_X.T,
n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
warnings.simplefilter("ignore", DeprecationWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = tree_builder(
this_X.T, n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
"""
Check that the height of the results of linkage tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
"""
Check that we obtain the correct number of clusters with
agglomerative clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rnd.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=mkdtemp(),
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Turn caching off now
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.todense(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(
clustering2.labels_,
clustering.labels_), 1)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
assert_warns(DeprecationWarning, WardAgglomeration)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_array_equal(agglo.labels_, ward.labels_)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
"""
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import kneighbors_graph
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
connectivity = kneighbors_graph(X, 10)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/qt_editor/figureoptions.py | 2 | 9182 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# see the mpl licenses directory for a copy of the license
"""Module that provides a GUI-based editor for matplotlib's figure options"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os.path as osp
import re
import matplotlib
from matplotlib import cm, markers, colors as mcolors
import matplotlib.backends.qt_editor.formlayout as formlayout
from matplotlib.backends.qt_compat import QtGui
def get_icon(name):
basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
return QtGui.QIcon(osp.join(basedir, name))
LINESTYLES = {'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'None': 'None',
}
DRAWSTYLES = {
'default': 'Default',
'steps-pre': 'Steps (Pre)', 'steps': 'Steps (Pre)',
'steps-mid': 'Steps (Mid)',
'steps-post': 'Steps (Post)'}
MARKERS = markers.MarkerStyle.markers
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
# Get / General
# Cast to builtin floats as they have nicer reprs.
xmin, xmax = map(float, axes.get_xlim())
ymin, ymax = map(float, axes.get_ylim())
general = [('Title', axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
('Left', xmin), ('Right', xmax),
('Label', axes.get_xlabel()),
('Scale', [axes.get_xscale(), 'linear', 'log', 'logit']),
sep,
(None, "<b>Y-Axis</b>"),
('Bottom', ymin), ('Top', ymax),
('Label', axes.get_ylabel()),
('Scale', [axes.get_yscale(), 'linear', 'log', 'logit']),
sep,
('(Re-)Generate automatic legend', False),
]
# Save the unit data
xconverter = axes.xaxis.converter
yconverter = axes.yaxis.converter
xunits = axes.xaxis.get_units()
yunits = axes.yaxis.get_units()
# Sorting for default labels (_lineXXX, _imageXXX).
def cmp_key(label):
match = re.match(r"(_line|_image)(\d+)", label)
if match:
return match.group(1), int(match.group(2))
else:
return label, 0
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
linedict[label] = line
curves = []
def prepare_data(d, init):
"""Prepare entry for FormLayout.
`d` is a mapping of shorthands to style names (a single style may
have multiple shorthands, in particular the shorthands `None`,
`"None"`, `"none"` and `""` are synonyms); `init` is one shorthand
of the initial style.
This function returns an list suitable for initializing a
FormLayout combobox, namely `[initial_name, (shorthand,
style_name), (shorthand, style_name), ...]`.
"""
# Drop duplicate shorthands from dict (by overwriting them during
# the dict comprehension).
name2short = {name: short for short, name in d.items()}
# Convert back to {shorthand: name}.
short2name = {short: name for name, short in name2short.items()}
# Find the kept shorthand for the style specified by init.
canonical_init = name2short[d[init]]
# Sort by representation and prepend the initial value.
return ([canonical_init] +
sorted(short2name.items(),
key=lambda short_and_name: short_and_name[1]))
curvelabels = sorted(linedict, key=cmp_key)
for label in curvelabels:
line = linedict[label]
color = mcolors.to_hex(
mcolors.to_rgba(line.get_color(), line.get_alpha()),
keep_alpha=True)
ec = mcolors.to_hex(
mcolors.to_rgba(line.get_markeredgecolor(), line.get_alpha()),
keep_alpha=True)
fc = mcolors.to_hex(
mcolors.to_rgba(line.get_markerfacecolor(), line.get_alpha()),
keep_alpha=True)
curvedata = [
('Label', label),
sep,
(None, '<b>Line</b>'),
('Line style', prepare_data(LINESTYLES, line.get_linestyle())),
('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())),
('Width', line.get_linewidth()),
('Color (RGBA)', color),
sep,
(None, '<b>Marker</b>'),
('Style', prepare_data(MARKERS, line.get_marker())),
('Size', line.get_markersize()),
('Face color (RGBA)', fc),
('Edge color (RGBA)', ec)]
curves.append([curvedata, label, ""])
# Is there a curve displayed?
has_curve = bool(curves)
# Get / Images
imagedict = {}
for image in axes.get_images():
label = image.get_label()
if label == '_nolegend_':
continue
imagedict[label] = image
imagelabels = sorted(imagedict, key=cmp_key)
images = []
cmaps = [(cmap, name) for name, cmap in sorted(cm.cmap_d.items())]
for label in imagelabels:
image = imagedict[label]
cmap = image.get_cmap()
if cmap not in cm.cmap_d.values():
cmaps = [(cmap, cmap.name)] + cmaps
low, high = image.get_clim()
imagedata = [
('Label', label),
('Colormap', [cmap.name] + cmaps),
('Min. value', low),
('Max. value', high),
('Interpolation',
[image.get_interpolation()]
+ [(name, name) for name in sorted(image.iterpnames)])]
images.append([imagedata, label, ""])
# Is there an image displayed?
has_image = bool(images)
datalist = [(general, "Axes", "")]
if curves:
datalist.append((curves, "Curves", ""))
if images:
datalist.append((images, "Images", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
orig_xlim = axes.get_xlim()
orig_ylim = axes.get_ylim()
general = data.pop(0)
curves = data.pop(0) if has_curve else []
images = data.pop(0) if has_image else []
if data:
raise ValueError("Unexpected field")
# Set / General
(title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale,
generate_legend) = general
if axes.get_xscale() != xscale:
axes.set_xscale(xscale)
if axes.get_yscale() != yscale:
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
# Restore the unit data
axes.xaxis.converter = xconverter
axes.yaxis.converter = yconverter
axes.xaxis.set_units(xunits)
axes.yaxis.set_units(yunits)
axes.xaxis._update_axisinfo()
axes.yaxis._update_axisinfo()
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
(label, linestyle, drawstyle, linewidth, color, marker, markersize,
markerfacecolor, markeredgecolor) = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_drawstyle(drawstyle)
line.set_linewidth(linewidth)
rgba = mcolors.to_rgba(color)
line.set_alpha(None)
line.set_color(rgba)
if marker is not 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# Set / Images
for index, image_settings in enumerate(images):
image = imagedict[imagelabels[index]]
label, cmap, low, high, interpolation = image_settings
image.set_label(label)
image.set_cmap(cm.get_cmap(cmap))
image.set_clim(*sorted([low, high]))
image.set_interpolation(interpolation)
# re-generate legend, if checkbox is checked
if generate_legend:
draggable = None
ncol = 1
if axes.legend_ is not None:
old_legend = axes.get_legend()
draggable = old_legend._draggable is not None
ncol = old_legend._ncol
new_legend = axes.legend(ncol=ncol)
if new_legend:
new_legend.draggable(draggable)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
if not (axes.get_xlim() == orig_xlim and axes.get_ylim() == orig_ylim):
figure.canvas.toolbar.push_current()
data = formlayout.fedit(datalist, title="Figure options", parent=parent,
icon=get_icon('qt4_editor_options.svg'),
apply=apply_callback)
if data is not None:
apply_callback(data)
| mit |
ngoix/OCRF | examples/neural_networks/plot_rbm_logistic_classification.py | 99 | 4608 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
anorfleet/turntable | test/lib/python2.7/site-packages/numpy/lib/recfunctions.py | 41 | 35014 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
ilyes14/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
gcarq/freqtrade | freqtrade/optimize/default_hyperopt_loss.py | 1 | 1967 | """
ShortTradeDurHyperOptLoss
This module defines the default HyperoptLoss class which is being used for
Hyperoptimization.
"""
from math import exp
from pandas import DataFrame
from freqtrade.optimize.hyperopt import IHyperOptLoss
# Set TARGET_TRADES to suit your number concurrent trades so its realistic
# to the number of days
TARGET_TRADES = 600
# This is assumed to be expected avg profit * expected trade count.
# For example, for 0.35% avg per trade (or 0.0035 as ratio) and 1100 trades,
# expected max profit = 3.85
# Check that the reported Σ% values do not exceed this!
# Note, this is ratio. 3.85 stated above means 385Σ%.
EXPECTED_MAX_PROFIT = 3.0
# Max average trade duration in minutes.
# If eval ends with higher value, we consider it a failed eval.
MAX_ACCEPTED_TRADE_DURATION = 300
class ShortTradeDurHyperOptLoss(IHyperOptLoss):
"""
Defines the default loss function for hyperopt
"""
@staticmethod
def hyperopt_loss_function(results: DataFrame, trade_count: int,
*args, **kwargs) -> float:
"""
Objective function, returns smaller number for better results
This is the Default algorithm
Weights are distributed as follows:
* 0.4 to trade duration
* 0.25: Avoiding trade loss
* 1.0 to total profit, compared to the expected value (`EXPECTED_MAX_PROFIT`) defined above
"""
total_profit = results['profit_percent'].sum()
trade_duration = results['trade_duration'].mean()
trade_loss = 1 - 0.25 * exp(-(trade_count - TARGET_TRADES) ** 2 / 10 ** 5.8)
profit_loss = max(0, 1 - total_profit / EXPECTED_MAX_PROFIT)
duration_loss = 0.4 * min(trade_duration / MAX_ACCEPTED_TRADE_DURATION, 1)
result = trade_loss + profit_loss + duration_loss
return result
# Create an alias for This to allow the legacy Method to work as well.
DefaultHyperOptLoss = ShortTradeDurHyperOptLoss
| gpl-3.0 |
maheshakya/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
nivm/learningchess | chess/complexmove/draw_moves_dist.py | 1 | 5247 | import sys
import re
from collections import defaultdict
import itertools
from math import sqrt
import matplotlib.pyplot as plt
pieces_hash = {"r": "rook", "k": "king", "q": "queen", "b" : "bishop", \
"n" : "knight", "p" : "pawn"}
def normalize_area_vec (piece_area):
normalization_factor = 1000.0 / sum(piece_area)
piece_area = [normalization_factor * p for p in piece_area]
return piece_area
def draw_charts(chess_pieces_move_dict):
for piece, piece_moves in chess_pieces_move_dict.iteritems():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(pieces_hash[piece].upper() + " move histogram")
piece_x = []
piece_y = []
piece_area = []
for locations, count in piece_moves.iteritems():
piece_x.append(locations[1])
piece_y.append(locations[0])
piece_area.append(count)
piece_area = normalize_area_vec(piece_area)
plt.scatter(piece_x, piece_y, s=piece_area, alpha=0.5)
# Adding the piece it self
plt.scatter([0], [0], s=200, alpha=0.6, color="red")
ax.set_xticks(range(min(piece_x)-1,max(piece_x)+2))
ax.set_yticks(range(min(piece_y)-1,max(piece_y)+2))
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.show()
def draw_overall_chart (chess_pieces_move_dict):
fig = plt.figure()
ax = fig.add_subplot(111)
piece_color_hash = {"p": "LightSlateGray", "k": "Navy", "q":"ForestGreen",\
"r": "OrangeRed", "n": "LimeGreen","b": "red"}
legend,legend_text = [], []
for i, (piece, piece_moves) in enumerate(chess_pieces_move_dict.iteritems()):
#ax.set_title(pieces_hash[piece].upper() + " move histogram")
piece_x = []
piece_y = []
piece_area = []
#legend_text.append(pieces_hash[piece].upper())
legend_text.append(pieces_hash[piece])
for locations, count in piece_moves.iteritems():
piece_x.append(locations[0])
piece_y.append(locations[1])
piece_area.append(count)
piece_area = normalize_area_vec(piece_area)
plt.scatter(piece_x, piece_y, alpha=0.75, \
color=piece_color_hash[piece], s=piece_area)
#print type(x), x.get_sizes()
ax.set_xticks(range(min(piece_x)-1,max(piece_x)+2))
ax.set_yticks(range(min(piece_y)-1,max(piece_y)+2))
#, fontsize=8
plt.legend(legend_text, scatterpoints=1,loc='lower center',ncol=6, prop={'size':10})
plt.scatter([0], [0], s=180, alpha=0.9, color="black")
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.show()
def surrounds_histograms(surrounds_data):
histogram = defaultdict(lambda: defaultdict(int))
for surround, count in surrounds_data.iteritems():
elements = surround.split(";")
for i in xrange(0, len(elements)):
histogram[i][elements[i]]+=count
#histogram = {k : dict(v) for k,v in histogram.iteritems() if k!=4}
histogram = {k : dict(v) for k,v in histogram.iteritems() if len(v)==1 and k!=4}
return histogram
def rotate_surround(surround):
'''
Rotate surrond so white surround and black
surround would act the same
'''
surround = surround.split(";")
n = int(sqrt(len(surround)))
surround = [surround[i:i+n] for i in xrange(0, n*n,n)]
surround = [reversed(z) for z in reversed(surround)]
surround = list(itertools.chain(*surround))
surround = ";".join(surround)
return surround
def group_by_histograms(histograms):
histogram_groups = defaultdict(lambda: defaultdict(list))
for piece, diffs in histograms.iteritems():
for (x,y), histogram in diffs.iteritems():
count = list(itertools.chain(*[k.values() for k in histogram.values() \
if len(k)==1]))[0]
if count < 10:
continue
keys = tuple(sorted(histogram.keys()))
values = tuple([histogram[k].keys()[0] for k in keys])
histogram_groups[piece][keys].append(((x,y),count, values))
return histogram_groups
def main():
'''
Draw a histogram of the relative movments of the pieces
Input count piece x y
'''
chess_pieces_move_dict = {}
#
for line in sys.stdin:
fields = re.split("\t", line.strip())
if len(fields)==3:
piece, x,y = fields
elif len(fields)==4:
piece, x,y, surround = fields
x = int(x)
y = int(y)
# Adjust black and white behaviour
if piece.lower() != piece:
x = -x
y = -y
if surround:
surround = rotate_surround(surround)
if piece.lower() not in chess_pieces_move_dict:
if surround:
chess_pieces_move_dict[piece.lower()] = \
defaultdict(lambda : defaultdict(int))
else:
chess_pieces_move_dict[piece.lower()]= defaultdict[(x,y)]
if surround:
chess_pieces_move_dict[piece.lower()][(x,y)][surround]+= 1
else:
chess_pieces_move_dict[piece.lower()][(x,y)]+= 1
histograms = defaultdict(lambda: defaultdict(dict))
for piece in chess_pieces_move_dict:
for x,y in chess_pieces_move_dict[piece]:
histogram = surrounds_histograms(chess_pieces_move_dict[piece][(x,y)])
if histogram:
histograms[piece][(x,y)] = histogram
histogram_groups = group_by_histograms(histograms)
for piece, piece_groups in histogram_groups.iteritems():
for group, moves in piece_groups.iteritems():
print piece,'\t',group,'\t',moves
print '\n\n\n'
for piece, diffs in histograms.iteritems():
for (x,y), histogram in diffs.iteritems():
print piece,'\t',(x,y),'\t',histogram
#draw_charts(chess_pieces_move_dict)
#draw_overall_chart(chess_pieces_move_dict)
if __name__ == '__main__':
main() | apache-2.0 |
holdenk/spark | python/pyspark/worker.py | 5 | 28191 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
import os
import sys
import time
from inspect import currentframe, getframeinfo, getfullargspec
import importlib
# 'resource' is a Unix specific module.
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
import traceback
import warnings
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import local_connect_and_auth
from pyspark.taskcontext import BarrierTaskContext, TaskContext
from pyspark.files import SparkFiles
from pyspark.resource import ResourceInformation
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, read_bool, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer
from pyspark.sql.pandas.serializers import ArrowStreamPandasUDFSerializer, CogroupUDFSerializer
from pyspark.sql.pandas.types import to_arrow_type
from pyspark.sql.types import StructType
from pyspark.util import fail_on_stopiteration, try_simplify_traceback
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so do not add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
def verify_result_length(result, length):
if len(result) != length:
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (length, len(result)))
return result
return lambda *a: (verify_result_length(
verify_result_type(f(*a)), len(a[0])), arrow_return_type)
def wrap_pandas_iter_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
return lambda *iterator: map(lambda res: (res, arrow_return_type),
map(verify_result_type, f(*iterator)))
def wrap_cogrouped_map_pandas_udf(f, return_type, argspec):
def wrapped(left_key_series, left_value_series, right_key_series, right_value_series):
import pandas as pd
left_df = pd.concat(left_value_series, axis=1)
right_df = pd.concat(right_value_series, axis=1)
if len(argspec.args) == 2:
result = f(left_df, right_df)
elif len(argspec.args) == 3:
key_series = left_key_series if not left_df.empty else right_key_series
key = tuple(s[0] for s in key_series)
result = f(key, left_df, right_df)
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda kl, vl, kr, vr: [(wrapped(kl, vl, kr, vr), to_arrow_type(return_type))]
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type, runner_conf, udf_index):
window_bound_types_str = runner_conf.get('pandas_window_bound_types')
window_bound_type = [t.strip().lower() for t in window_bound_types_str.split(',')][udf_index]
if window_bound_type == 'bounded':
return wrap_bounded_window_agg_pandas_udf(f, return_type)
elif window_bound_type == 'unbounded':
return wrap_unbounded_window_agg_pandas_udf(f, return_type)
else:
raise RuntimeError("Invalid window bound type: {} ".format(window_bound_type))
def wrap_unbounded_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_bounded_window_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(begin_index, end_index, *series):
import pandas as pd
result = []
# Index operation is faster on np.ndarray,
# So we turn the index series into np array
# here for performance
begin_array = begin_index.values
end_array = end_index.values
for i in range(len(begin_array)):
# Note: Create a slice from a series for each window is
# actually pretty expensive. However, there
# is no easy way to reduce cost here.
# Note: s.iloc[i : j] is about 30% faster than s[i: j], with
# the caveat that the created slices shares the same
# memory with s. Therefore, user are not allowed to
# change the value of input series inside the window
# function. It is rare that user needs to modify the
# input series in the window function, and therefore,
# it is be a reasonable restriction.
# Note: Calling reset_index on the slices will increase the cost
# of creating slices by about 100%. Therefore, for performance
# reasons we don't do it here.
series_slices = [s.iloc[begin_array[i]: end_array[i]] for s in series]
result.append(f(*series_slices))
return pd.Series(result)
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
chained_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if chained_func is None:
chained_func = f
else:
chained_func = chain(chained_func, f)
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
func = chained_func
else:
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(chained_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_cogrouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type, runner_conf, udf_index)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
runner_conf = {}
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
# Load conf used for pandas_udf evaluation
num_conf = read_int(infile)
for i in range(num_conf):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
runner_conf[k] = v
# NOTE: if timezone is set here, that implies respectSessionTimeZone is True
timezone = runner_conf.get("spark.sql.session.timeZone", None)
safecheck = runner_conf.get("spark.sql.execution.pandas.convertToArrowArraySafely",
"false").lower() == 'true'
# Used by SQL_GROUPED_MAP_PANDAS_UDF and SQL_SCALAR_PANDAS_UDF when returning StructType
assign_cols_by_name = runner_conf.get(
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName", "true")\
.lower() == "true"
if eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
ser = CogroupUDFSerializer(timezone, safecheck, assign_cols_by_name)
else:
# Scalar Pandas UDF handles struct type arguments as pandas DataFrames instead of
# pandas Series. See SPARK-27240.
df_for_struct = (eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF or
eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, assign_cols_by_name,
df_for_struct)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
num_udfs = read_int(infile)
is_scalar_iter = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
is_map_iter = eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
if is_scalar_iter or is_map_iter:
if is_scalar_iter:
assert num_udfs == 1, "One SCALAR_ITER UDF expected here."
if is_map_iter:
assert num_udfs == 1, "One MAP_ITER UDF expected here."
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
def func(_, iterator):
num_input_rows = 0
def map_batch(batch):
nonlocal num_input_rows
udf_args = [batch[offset] for offset in arg_offsets]
num_input_rows += len(udf_args[0])
if len(udf_args) == 1:
return udf_args[0]
else:
return tuple(udf_args)
iterator = map(map_batch, iterator)
result_iter = udf(iterator)
num_output_rows = 0
for result_batch, result_type in result_iter:
num_output_rows += len(result_batch)
# This assert is for Scalar Iterator UDF to fail fast.
# The length of the entire input can only be explicitly known
# by consuming the input iterator in user side. Therefore,
# it's very unlikely the output length is higher than
# input length.
assert is_map_iter or num_output_rows <= num_input_rows, \
"Pandas SCALAR_ITER UDF outputted more rows than input rows."
yield (result_batch, result_type)
if is_scalar_iter:
try:
next(iterator)
except StopIteration:
pass
else:
raise RuntimeError("pandas iterator UDF should exhaust the input "
"iterator.")
if num_output_rows != num_input_rows:
raise RuntimeError(
"The length of output in Scalar iterator pandas UDF should be "
"the same with the input's; however, the length of output was %d and the "
"length of input was %d." % (num_output_rows, num_input_rows))
# profiling is not supported for UDF
return func, None, ser, ser
def extract_key_value_indexes(grouped_arg_offsets):
"""
Helper function to extract the key and value indexes from arg_offsets for the grouped and
cogrouped pandas udfs. See BasePandasGroupExec.resolveArgOffsets for equivalent scala code.
Parameters
----------
grouped_arg_offsets: list
List containing the key and value indexes of columns of the
DataFrames to be passed to the udf. It consists of n repeating groups where n is the
number of DataFrames. Each group has the following format:
group[0]: length of group
group[1]: length of key indexes
group[2.. group[1] +2]: key attributes
group[group[1] +3 group[0]]: value attributes
"""
parsed = []
idx = 0
while idx < len(grouped_arg_offsets):
offsets_len = grouped_arg_offsets[idx]
idx += 1
offsets = grouped_arg_offsets[idx: idx + offsets_len]
split_index = offsets[0] + 1
offset_keys = offsets[1: split_index]
offset_values = offsets[split_index:]
parsed.append([offset_keys, offset_values])
idx += offsets_len
return parsed
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
# Create function like this:
# mapper a: f([a[0]], [a[0], a[1]])
def mapper(a):
keys = [a[o] for o in parsed_offsets[0][0]]
vals = [a[o] for o in parsed_offsets[0][1]]
return f(keys, vals)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because cogrouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
def mapper(a):
df1_keys = [a[0][o] for o in parsed_offsets[0][0]]
df1_vals = [a[0][o] for o in parsed_offsets[0][1]]
df2_keys = [a[1][o] for o in parsed_offsets[1][0]]
df2_vals = [a[1][o] for o in parsed_offsets[1][1]]
return f(df1_keys, df1_vals, df2_keys, df2_vals)
else:
udfs = []
for i in range(num_udfs):
udfs.append(read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=i))
def mapper(a):
result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in udfs)
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
if len(result) == 1:
return result[0]
else:
return result
func = lambda _, it: map(mapper, it)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions. " +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# read inputs only for a barrier task
isBarrier = read_bool(infile)
boundPort = read_int(infile)
secret = UTF8Deserializer().loads(infile)
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
if memory_limit_mb > 0 and has_resource_module:
total_memory = resource.RLIMIT_AS
try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
# convert to bytes
new_limit = memory_limit_mb * 1024 * 1024
if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
msg = "Setting mem limits to {0} of max {1}\n".format(new_limit, new_limit)
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
except (resource.error, OSError, ValueError) as e:
# not all systems support resource limits, so warn instead of failing
lineno = getframeinfo(
currentframe()).lineno + 1 if currentframe() is not None else 0
print(warnings.formatwarning(
"Failed to set memory limit: {0}".format(e),
ResourceWarning,
__file__,
lineno
), file=sys.stderr)
# initialize global state
taskContext = None
if isBarrier:
taskContext = BarrierTaskContext._getOrCreate()
BarrierTaskContext._initialize(boundPort, secret)
# Set the task context instance here, so we can get it by TaskContext.get for
# both TaskContext and BarrierTaskContext
TaskContext._setTaskContext(taskContext)
else:
taskContext = TaskContext._getOrCreate()
# read inputs for TaskContext info
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._resources = {}
for r in range(read_int(infile)):
key = utf8_deserializer.loads(infile)
name = utf8_deserializer.loads(infile)
addresses = []
taskContext._resources = {}
for a in range(read_int(infile)):
addresses.append(utf8_deserializer.loads(infile))
taskContext._resources[key] = ResourceInformation(name, addresses)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
importlib.invalidate_caches()
# fetch names and values of broadcast variables
needs_broadcast_decryption_server = read_bool(infile)
num_broadcast_variables = read_int(infile)
if needs_broadcast_decryption_server:
# read the decrypted data from a server in the jvm
port = read_int(infile)
auth_secret = utf8_deserializer.loads(infile)
(broadcast_sock_file, _) = local_connect_and_auth(port, auth_secret)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
if needs_broadcast_decryption_server:
read_bid = read_long(broadcast_sock_file)
assert(read_bid == bid)
_broadcastRegistry[bid] = \
Broadcast(sock_file=broadcast_sock_file)
else:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
if needs_broadcast_decryption_server:
broadcast_sock_file.write(b'1')
broadcast_sock_file.close()
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
out_iter = func(split_index, iterator)
try:
serializer.dump_stream(out_iter, outfile)
finally:
if hasattr(out_iter, 'close'):
out_iter.close()
if profiler:
profiler.profile(process)
else:
process()
# Reset task context to None. This is a guard code to avoid residual context when worker
# reuse.
TaskContext._setTaskContext(None)
BarrierTaskContext._setTaskContext(None)
except BaseException as e:
try:
exc_info = None
if os.environ.get("SPARK_SIMPLIFIED_TRACEBACK", False):
tb = try_simplify_traceback(sys.exc_info()[-1])
if tb is not None:
e.__cause__ = None
exc_info = "".join(traceback.format_exception(type(e), e, tb))
if exc_info is None:
exc_info = traceback.format_exc()
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(exc_info.encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except BaseException:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, _) = local_connect_and_auth(java_port, auth_secret)
main(sock_file, sock_file)
| apache-2.0 |
larsmans/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 9 | 11108 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import make_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'roc_auc', 'average_precision', 'precision',
'recall', 'log_loss',
'adjusted_rand_score' # not really, but works
]
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
def test_check_scoring():
"""Test all branches of check_scoring"""
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
pattern = (r"The estimator passed should have a 'score'"
r" or a 'predict' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator,
"accuracy")
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_make_scorer():
"""Sanity check on the make_scorer factory function."""
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
"""Test regression scorers."""
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
"""Test scorers that take thresholds."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = SCORERS['log_loss'](clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
"""Test that the scorer work with multilabel-indicator format
for multilabel and multi-output multi-class classifier
"""
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
"""Test clustering scorers against gold standard labeling."""
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['adjusted_rand_score'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
"""Test that when a list of scores is returned, we raise proper errors."""
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
def test_scorer_sample_weight():
"""Test that scorers support sample_weight or raise sensible errors"""
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier()
sensible_clf.fit(X_train, y_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS])
for name, scorer in SCORERS.items():
try:
weighted = scorer(estimator[name], X_test, y_test,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], y_test[10:])
unweighted = scorer(estimator[name], X_test, y_test)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
drphilmarshall/scriptutils | python/plotting.py | 1 | 1613 | ############################
# @file plotting.py
# @author Douglas Applegate
# @date 10/12/07
#
# Provides convienient wrappers around common plotting tasks
# using numpy and pylab
############################
__cvs_id__ = "$Id: plotting.py,v 1.1 2008-01-17 19:12:38 dapple Exp $"
############################
import matplotlib.pylab as pylab
import numpy
############################
def doFormating(**formating):
if 'title' in formating:
pylab.title(formating['title'])
if 'xlabel' in formating:
pylab.xlabel(formating['xlabel'])
if 'ylabel' in formating:
pylab.ylabel(formating['ylabel'])
############################
def histogram(a, bins=10, range=None, log = False, normed = False,
filename = None,
**formating):
hist, bins = numpy.histogram(a, bins, range, normed)
width = bins[1] - bins[0]
pylab.bar(bins[:-1], hist[:-1], width=width, log=log)
doFormating(**formating)
pylab.show()
if filename is not None:
pylab.savefig(filename)
pylab.clf()
#############################
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None,
log = False,
filename = None,
**formating):
hist, x, y = numpy.histogram2d(x, y, bins, range, normed, weights)
if log is True:
hist = numpy.log(hist)
X, Y = pylab.meshgrid(x,y)
pylab.pcolor(X, Y,hist.transpose())
pylab.colorbar()
doFormating(**formating)
pylab.show()
if filename is not None:
pylab.savefig(filename)
pylab.clf()
| mit |
iastro-pt/ObservationTools | rv.py | 2 | 13320 | """Radial Velocity Orbit.
Goals
-----
To calculate when the radial velocity is different by a certain value.
Plot radial velocity phase curves. Indicating obtained measurement locations.
"""
import argparse
import logging
import sys
from datetime import datetime
from typing import Dict, List, Any, Union
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants import c
from mpl_toolkits.axes_grid1 import host_subplot
from utils.parse import parse_paramfile
from utils.rv_utils import RV, JulianDate, prepare_mass_params, generate_companion_label
from utils.rv_utils import strtimes2jd, join_times, check_core_parameters
c_km_s = c.to(u.kilometer / u.second) # Speed of light in km/s
def parse_args(args):
# type: List[str] -> argparse.Namespace
"""RV Argparse parser."""
parser = argparse.ArgumentParser(description='Radial velocity plotting')
parser.add_argument('params', help='RV parameters filename', type=str)
parser.add_argument('-d', '--date', default=None,
help='Reference date in format YYYY-MM-DD [HH:MM:SS]. Default=None uses time of now.')
# parser.add_argument('-r', '--rv_diff', help='RV difference threshold to find')
parser.add_argument('-o', '--obs_times', help='Times of previous observations YYYY-MM-DD format',
nargs='+', default=None)
parser.add_argument('-l', '--obs_list', help='File with list of obs times.', type=str, default=None)
# parser.add_argument('-f', '--files', help='Params and obs-times are file'
# ' names to open', action='store_true')
parser.add_argument('-m', '--mode', help='Display mode '
' e.g. phase or time plot. Default="phase"',
choices=['phase', 'time'], default='phase', type=str)
parser.add_argument("-c", "--cycle_fraction", default=1.0, help="Cylcle fraction to display", type=float)
parser.add_argument("-p", "--phase_center", help="Center of phase curve.", default=0)
parser.add_argument("--save_only", help="Only save the figure, do not show it.", action="store_true")
parser.add_argument("--debug", help="Turning on debug output", action='store_true', default=False)
return parser.parse_args(args)
def main(params, mode="phase", obs_times=None, obs_list=None, date=None,
cycle_fraction=1, phase_center=0, save_only=False): # obs_times=None, mode='phase', rv_diff=None
# type: (Dict[str, Union[str, float]], str, List[str], str, str, bool) -> None
r"""Radial velocity displays.
Parameters
----------
params: str
Filename for text file containing the rv parameters. Format of 'param = value\n'.
mode: str
Mode for script to use. Phase, time, future.
obs_times: list of str
Dates of observations added manually at command line of format YYYY-MM-DD.
obs_list: str
Filename for list which contains obs_times (YYY-MM-DD HH:MM:SS).
date: str
Reference date for some modes. Defaults=None)
"""
# Load in params and store as a dictionary
parameters = parse_paramfile(params)
parameters = prepare_mass_params(parameters, only_msini=True)
parameters = check_core_parameters(parameters)
# combine obs_times and obs_list and turn into jd.
if obs_times:
if (".txt" in obs_times) or (".dat" in obs_times):
raise ValueError("Filename given instead of dates for obs_times.")
obs_times = join_times(obs_times, obs_list)
obs_jd = strtimes2jd(obs_times)
test_jd = obs_jd[0] if isinstance(obs_jd, (list, tuple)) else obs_jd
if ((str(parameters["tau"]).startswith("24") and not str(test_jd).startswith("24")) or
(not(str(parameters["tau"]).startswith("24")) and str(test_jd).startswith("24"))):
raise ValueError("Mismatch between Tau parameter '{0}' and times used '{1}'.".format(parameters["tau"], obs_jd))
date_split = JulianDate.now().jd if date is None else JulianDate.from_str(date).jd
# Slit past and future obs
future_obs = [obs for obs in obs_jd if obs > date_split]
past_obs = [obs for obs in obs_jd if obs <= date_split]
host_orbit = RV.from_dict(parameters)
companion_orbit = host_orbit.create_companion()
if mode == "phase":
fig = binary_phase_curve(host_orbit, companion_orbit, t_past=past_obs,
t_future=future_obs,
cycle_fraction=cycle_fraction,
phase_center=phase_center)
elif mode == "time":
fig = binary_time_curve(host_orbit, companion_orbit, t_past=past_obs,
start_day=date_split, t_future=future_obs,
cycle_fraction=cycle_fraction)
else:
raise NotImplementedError("Other modes are not Implemented yet.")
if not save_only:
fig.show()
return fig
def binary_phase_curve(host, companion, cycle_fraction=1, phase_center=0, ignore_mean=False, t_past=False,
t_future=False):
# type: (RV, RV, float, bool, Any, Any) -> int
"""Plot RV phase curve centered on zero.
Parameters
----------
host: RV
RV class for systems host.
cycle_fraction: float
Fraction of phase space to plot. (Default=1)
ignore_mean: bool
Remove the contribution from the systems mean velocity. (Default=False)
t_past: float, array-like
Times of past observations.
t_future: float, array-like
Times of future observations.
phase_center: float
Center of phase curve to show.
Returns
-------
fig: matplotlib.Figure
Returns figure object.
"""
companion_present = companion is not None
phase = np.linspace(-0.5, 0.5, 100) * cycle_fraction + phase_center
fig = plt.figure(figsize=(10, 7))
fig.subplots_adjust()
ax1 = host_subplot(111)
host.ignore_mean = ignore_mean
host_rvs = host.rv_at_phase(phase)
ax1.plot(phase, host_rvs, label="Host", lw=2, color="k")
host_delta_y = host.max_amp() * 1.1
ax1.set_ylim(host.gamma - host_delta_y, host.gamma + host_delta_y)
ax1.axhline(host.gamma, color="black", linestyle="-.", alpha=0.5)
ax1.set_xlabel("Orbital Phase")
ax1.set_ylabel("Host RV (km/s)")
if companion_present:
companion.ignore_mean = ignore_mean
companion_rvs = companion.rv_at_phase(phase)
ax2 = ax1.twinx()
companion_label = generate_companion_label(companion)
ax2.plot(phase, companion_rvs, '--', label=companion_label, lw=2)
# Determine rv max amplitudes.
comp_delta_y = companion.max_amp() * 1.1
ax2.set_ylim(companion.gamma - comp_delta_y, companion.gamma + comp_delta_y)
ax2.axhline(companion.gamma, color="black", linestyle="-.", alpha=0.5)
ax2.set_ylabel("Companion RV (km/s)")
if t_past:
t_past = np.asarray(t_past)
phi = ((t_past - host.tau) / host.period + 0.5) % 1 - 0.5
rv_star = host.rv_at_phase(phi)
ax1.plot(phi, rv_star, ".", markersize=10, markeredgewidth=2)
if companion_present:
rv_planet = companion.rv_at_phase(phi)
ax2.plot(phi, rv_planet, "+", markersize=10, markeredgewidth=2)
if t_future:
t_future = np.asarray(t_future)
phi = ((t_future - host.tau) / host.period + 0.5) % 1 - 0.5
rv_star = host.rv_at_phase(phi)
ax1.plot(phi, rv_star, "ko", markersize=10, markeredgewidth=2, label="Host Obs")
if companion_present:
rv_planet = companion.rv_at_phase(phi)
ax2.plot(phi, rv_planet, "g*", markersize=10, markeredgewidth=2, label="Comp Obs")
if 'name' in host._params.keys():
if ("companion" in host._params.keys()) and (companion_present):
plt.title("RV Phase Curve for {} {}".format(host._params['name'].upper(), host._params['companion']))
else:
plt.title("RV Phase Curve for {}".format(host._params['name'].upper()))
else:
plt.title("RV Phase Curve")
plt.legend(loc=0)
return fig
def binary_time_curve(host, companion, cycle_fraction=1, ignore_mean=False, t_past=False, t_future=False,
start_day=None):
# type: (RV, RV, float, bool, Any, Any, Any) -> int
"""Plot RV phase curve centered on zero.
Parameters
----------
host: RV
RV class for system.
cycle_fraction: float
Minimum Fraction of orbit to plot. (Default=1)
ignore_mean: bool
Remove the contribution from the systems mean velocity. (Default=False)
t_past: float, array-like
Times of past observations in julian days.
t_future: float, array-like
Times of future observations in julian days.
start_day: str
Day to being RV curve from in julian days. The Default=None sets the start time to ephem.now().
Returns
-------
exit_status: bool
Returns 0 if successful.
Displays matplotlib figure.
"""
companion_present = companion is not None
t_start = start_day if start_day is not None else JulianDate.now().jd
# Make curve from start of t_past
print("t_past", t_past, "t_future", t_future)
if isinstance(t_past, float):
obs_start = t_past
elif t_past != [] and t_past is not None:
obs_start = np.min(t_past)
else:
obs_start = t_start
if isinstance(t_future, float):
obs_end = t_future
elif t_future != [] and t_future is not None:
obs_end = np.max(t_future)
else:
obs_end = t_start
# Specify 100 points per period
num_cycles = ((t_start + host.period * cycle_fraction) - np.min([t_start, obs_start])) / host.period
num_points = np.ceil(500 * num_cycles)
if num_points > 10000:
logging.debug("num points = {}".format(num_points))
raise ValueError("num_points is to large")
t_space = np.linspace(min([t_start, obs_start]), np.max([t_start + host.period * cycle_fraction, obs_end]),
num_points)
start_dt = JulianDate(t_start).to_datetime()
if (start_dt.hour == 0) and (start_dt.minute == 0) and (start_dt.second == 0):
start_string = datetime.strftime(start_dt, "%Y-%m-%d")
else:
start_string = datetime.strftime(start_dt, "%Y-%m-%d %H:%M:%S") # Issue with 00:00:01 not appearing
fig = plt.figure(figsize=(10, 7))
fig.subplots_adjust()
ax1 = host_subplot(111)
host.ignore_mean = ignore_mean
host_rvs = host.rv_at_times(t_space)
ax1.plot(t_space - t_start, host_rvs, label="Host", lw=2, color="k")
# Determine rv max amplitudes.
amp1 = host.max_amp()
# Adjust axis limits
ax1.set_ylim(host.gamma - (amp1 * 1.1), host.gamma + (amp1 * 1.1))
ax1.axhline(host.gamma, color="black", linestyle="-.", alpha=0.5)
ax1.set_xlabel("Days from {!s}".format(start_string))
ax1.set_ylabel("Host RV (km/s)")
if companion_present:
companion.ignore_mean = ignore_mean
companion_rvs = companion.rv_at_times(t_space)
companion_label = generate_companion_label(companion)
ax2 = ax1.twinx()
ax2.plot(t_space - t_start, companion_rvs, '--', label=companion_label, lw=2)
# Adjust axis limits
amp2 = companion.max_amp()
# Determine rv max amplitudes.
ax2.set_ylim(companion.gamma - (amp2 * 1.1), companion.gamma + (amp2 * 1.1))
ax2.axhline(companion.gamma, color="black", linestyle="-.", alpha=0.5)
ax2.set_ylabel("Companion RV (km/s)")
if t_past:
t_past = np.asarray(t_past)
rv_star = host.rv_at_times(t_past)
ax1.plot(t_past - t_start, rv_star, "b.", markersize=10, markeredgewidth=2, label="Host past")
if companion_present:
rv_planet = companion.rv_at_times(t_past)
ax2.plot(t_past - t_start, rv_planet, "r+", markersize=10, markeredgewidth=2, label="Comp past")
if t_future:
t_future = np.asarray(t_future)
rv_star = host.rv_at_times(t_future)
ax1.plot(t_future - t_start, rv_star, "ko", markersize=10, markeredgewidth=2, label="Host future")
if companion_present:
rv_planet = companion.rv_at_times(t_future)
ax2.plot(t_future - t_start, rv_planet, "g*", markersize=10, markeredgewidth=2, label="Comp future")
if 'name' in host._params.keys():
if ("companion" in host._params.keys()) and (companion_present):
plt.title("Radial Velocity Curve for {} {}".format(host._params['name'].upper(), host._params['companion']))
else:
plt.title("Radial Velocity Curve for {}".format(host._params['name'].upper()))
else:
plt.title("Radial Velocity Curve")
plt.legend(loc=0)
return fig
if __name__ == '__main__':
args = vars(parse_args(sys.argv[1:]))
debug = args.pop('debug')
opts = {k: args[k] for k in args}
if debug:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s')
fig = main(**opts)
plt.show(fig)
| mit |
prisae/blog-notebooks | travelmaps2.py | 1 | 11650 |
# coding: utf-8
# # `travelmaps2`: An updated version of `travelmaps`
#
# I did not want to change `travelmaps`, as it is a blog entry.
#
# These functions are very basic, and include almost no checking or similar at all. Feel free to fork and improve them!
# In[1]:
import shapefile
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib import rcParams, patheffects
from matplotlib.collections import LineCollection
# Disable DecompressionBombWarning
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
# In[2]:
def setup(dpi=300, sketch=(1, 100, 2), theme='light'):
"""Setup travelmaps."""
# Customized plt.xkcd()-settings
# http://jakevdp.github.io/blog/2013/07/10/XKCD-plots-in-matplotlib
rcParams['font.family'] = ['Humor Sans', 'Comic Sans MS']
rcParams['font.size'] = 8.0
rcParams['path.sketch'] = sketch
rcParams['axes.linewidth'] = 1.0
rcParams['lines.linewidth'] = 1.0
rcParams['grid.linewidth'] = 0.0
rcParams['axes.unicode_minus'] = False
if theme=='dark':
rcParams['path.effects'] = [patheffects.withStroke(linewidth=2, foreground="k")]
rcParams['figure.facecolor'] = 'black'
rcParams['figure.edgecolor'] = 'black'
rcParams['lines.color'] = 'white'
rcParams['patch.edgecolor'] = 'white'
rcParams['text.color'] = 'white'
rcParams['axes.facecolor'] = 'black'
rcParams['axes.edgecolor'] = 'white'
rcParams['axes.labelcolor'] = 'white'
rcParams['xtick.color'] = 'white'
rcParams['ytick.color'] = 'white'
rcParams['grid.color'] = 'white'
rcParams['savefig.facecolor'] = 'black'
rcParams['savefig.edgecolor'] = 'black'
else:
rcParams['path.effects'] = [patheffects.withStroke(linewidth=2, foreground="w")]
rcParams['figure.facecolor'] = 'white'
rcParams['figure.edgecolor'] = 'white'
rcParams['lines.color'] = 'black'
rcParams['patch.edgecolor'] = 'black'
rcParams['text.color'] = 'black'
rcParams['axes.facecolor'] = 'white'
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'black'
rcParams['ytick.color'] = 'black'
rcParams['grid.color'] = 'black'
rcParams['savefig.facecolor'] = 'white'
rcParams['savefig.edgecolor'] = 'white'
# *Bayesian Methods for Hackers*-colour-cylce
# (https://github.com/pkgpl/PythonProcessing/blob/master/results/matplotlibrc.bmh.txt)
rcParams['axes.prop_cycle'] = plt.cycler('color', ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00',
'#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2'])
# Adjust dpi, so figure on screen and savefig looks the same
rcParams['figure.dpi'] = dpi
rcParams['savefig.dpi'] = dpi
# In[ ]:
def setup_noxkcd(dpi=300, theme='light'):
"""Setup Maps."""
if theme=='dark':
rcParams['figure.facecolor'] = 'black'
rcParams['figure.edgecolor'] = 'black'
rcParams['lines.color'] = 'white'
rcParams['patch.edgecolor'] = 'white'
rcParams['text.color'] = 'white'
rcParams['axes.facecolor'] = 'black'
rcParams['axes.edgecolor'] = 'white'
rcParams['axes.labelcolor'] = 'white'
rcParams['xtick.color'] = 'white'
rcParams['ytick.color'] = 'white'
rcParams['grid.color'] = 'white'
rcParams['savefig.facecolor'] = 'black'
rcParams['savefig.edgecolor'] = 'black'
else:
rcParams['figure.facecolor'] = 'white'
rcParams['figure.edgecolor'] = 'white'
rcParams['lines.color'] = 'black'
rcParams['patch.edgecolor'] = 'black'
rcParams['text.color'] = 'black'
rcParams['axes.facecolor'] = 'white'
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'black'
rcParams['ytick.color'] = 'black'
rcParams['grid.color'] = 'black'
rcParams['savefig.facecolor'] = 'white'
rcParams['savefig.edgecolor'] = 'white'
# *Bayesian Methods for Hackers*-colour-cylce
# (https://github.com/pkgpl/PythonProcessing/blob/master/results/matplotlibrc.bmh.txt)
rcParams['axes.prop_cycle'] = plt.cycler('color', ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00',
'#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2'])
# Adjust dpi, so figure on screen and savefig looks the same
rcParams['figure.dpi'] = dpi
rcParams['savefig.dpi'] = dpi
# In[3]:
def cm2in(length, decimals=2):
"""Convert cm to inch.
Parameters
----------
length : scalar or vector
Numbers to be converted.
decimals : int, optional; <2>
As in np.round, used to round the result.
Returns
-------
cm2in : scalar or vector
Converted numbers.
Examples
--------
>>> from adashof import cm2in
>>> cm2in(5)
1.97
"""
# Test input
try:
length = np.array(length, dtype='float')
decimals = int(decimals)
except ValueError:
print("{length} must be a number, {decimals} an integer")
return np.round(length/2.54, decimals)
# In[4]:
def country(countries, bmap, fc=None, ec='none', lw=1, alpha=1, adm=0, gadmpath='/home/dtr/Documents/Webpages/blog-notebooks/data/TravelMap/'):
"""Colour <countries> with a <bmap> projection.
This script is adapted from:
http://www.geophysique.be/2013/02/12/
matplotlib-basemap-tutorial-10-shapefiles-unleached-continued
I downloaded the countries shapefile from the *Global Administrative Areas*
website, [gadm.org](http://gadm.org).
=> You have to use the same abbreviations for the countries as GADM does, or adjust the script.
=> You have to download the shapefiles from GADM, and extract them into the <gadmpath> directory.
Of course, you can use any other shapfiles you have, and adjust the script accordingly.
Parameters
----------
countries : string or list of strings
Countries to be plotted.
bmap : handle
As you get from bmap = Basemap().
fc : None or colour, or list of colours; <None>
Face-colour for country; if <None>, it will cycle through colour-cycle.
ec : 'none' or colour (scalar or list); <'none'>
Edge-colour for country.
lw : scalar or list; <1>
Linewidth for country.
alpha: scalar or list; <1>
Transparency.
adm : {0, 1, 2, 3}; <0>
Administrative area to choose.
gadmpath : 'string'
Absolut or relative path to shapefiles.
"""
# Ensure countries is a list
if not isinstance(countries, list):
countries = [countries,]
# Get current axis
cax = plt.gca()
# Loop through the countries
for country in countries:
# Get shapefile for the country; extract shapes and records
r = shapefile.Reader(gadmpath+country+'_adm/'+country+'_adm'+str(adm),
encoding='windows-1252')
shapes = r.shapes()
records = r.records()
# Loop through the records; for adm0 this is only 1 run
n = 0
for record, shape in zip(records,shapes):
lons,lats = zip(*shape.points)
data = np.array(bmap(lons, lats)).T
if len(shape.parts) == 1:
segs = [data,]
else:
segs = []
for i in range(1,len(shape.parts)):
index = shape.parts[i-1]
index2 = shape.parts[i]
segs.append(data[index:index2])
segs.append(data[index2:])
lines = LineCollection(segs,antialiaseds=(1,))
# If facecolor is provided, use; else cycle through colours
if fc:
if not isinstance(fc, list):
lines.set_facecolors(fc)
else:
lines.set_facecolors(fc[n])
else:
cycle = cax._get_lines.prop_cycler
lines.set_facecolors(next(cycle)['color'])
# Edge colour
if not isinstance(ec, list):
lines.set_edgecolors(ec)
else:
lines.set_edgecolors(ec[n])
# Alpha
if not isinstance(alpha, list):
lines.set_alpha(alpha)
else:
lines.set_alpha(alpha[n])
# Line width
if not isinstance(lw, list):
lines.set_linewidth(lw)
else:
lines.set_linewidth(lw[n])
# Add to current plot
cax.add_collection(lines)
n += 1
# In[5]:
def city(city, name, bmap, mfc=None, mec=None, color='b', offs=[.1, .1], halign='left'):
"""Plot a circle at <city> and annotate with <name>, with a <bmap> projection.
Parameters
----------
city : List of two scalars
[Northing, Easting].
name : string
name to be plotted with city.
bmap : handle
As you get from bmap = Basemap().
mfc : None or colour; <None>
Marker face-colour for city; if <None>, it will cycle through colour-cycle.
colour : 'none' or colour; <'b'>
Colour for <name>.
offs : List of two scalars; <[.1, .1]>
Offset for <name> from <city>.
halign : {'left', 'right', 'center'}; <'left'>
Alignment of <name> relative to <city>.
"""
# mec from rcParams, to respect theme (dark/light)
if not mec:
mec = rcParams['axes.edgecolor']
# Get current axis
cax = plt.gca()
# Plot dot
# If mfc is provided, use; else cycle through colours
if not mfc:
cycle = cax._get_patches_for_fill.prop_cycler
mfc = next(cycle)['color']
bmap.plot(city[1], city[0], 'o', mfc=mfc, mec=mec, ms=4, mew=1, latlon=True)
# Annotate name
cax.annotate(name, bmap(city[1]+offs[0], city[0]+offs[1]),
horizontalalignment=halign, color=color, fontsize=7, zorder=10)
# In[6]:
def arrow(start, end, bmap, ec=None, fc=None, rad=-.3):
"""Plot an arrow from <start> to <end>, with a <bmap> projection.
Parameters
----------
start : List of two scalars
Start of arrow [Northing, Easting].
end : List of two scalars
End of arrow [Northing, Easting].
bmap : handle
As you get from bmap = Basemap().
ec : 'none' or colour; <'k'>
Edge-colour for arrow.
fc : 'none' or colour; <w>
Face-colour for arrow.
rad : Scalar; <.3]>
Curvature of arrow.
"""
# ec & fc from rcParams, to respect theme (dark/light)
if not ec:
ec = rcParams['axes.edgecolor']
if not fc:
fc = rcParams['axes.facecolor']
# Get current axis
cax = plt.gca()
# Plot arrow
arrowstyle='Fancy, head_length=.6, head_width=.6, tail_width=.4'
cax.annotate('', bmap(end[1], end[0]), bmap(start[1], start[0]),
arrowprops=dict(arrowstyle=arrowstyle,
alpha=.6,
patchA=None,
patchB=None,
shrinkA=3,
shrinkB=3,
fc=fc, ec=ec,
connectionstyle="arc3, rad="+str(rad),
))
| cc0-1.0 |
Fireblend/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
MATH2605-JES/python-LinearAlgebraAlgo | Util.py | 1 | 6152 | # General Utility Methods for Algorithms
import random as rand
import numpy as np
import numpy.matlib
import matplotlib.pyplot as pyplot
import matplotlib.colors as plotcolors
# James
def multiply_matrix(matrix_1, matrix_2):
if matrix_1.shape[1] != matrix_2.shape[0]:
return None
result = np.empty((matrix_1.shape[0], matrix_2.shape[1]), dtype=float)
# We can use transpose & dot product library function.
# Dot product of first rows of matrix_1 and matrix_2^t gives us first resulting number.of first row.
# Dot product of first row of matrix_1 and second row of matrix_2^t gives us second resulting number of first row.
matrix_2_t = matrix_2.transpose()
for i in range(matrix_1.shape[0]):
for j in range(matrix_2_t.shape[0]):
result[i, j] = matrix_1[i].dot(matrix_2_t[j])
return result
# Emeke
# works n x m matrices
def multiply_matrix2(matrix_1, matrix_2):
product = np.matlib.empty((matrix_1.shape[0], matrix_2.shape[1]))
for i in range(product.shape[0]):
for j in range(product.shape[1]):
product[i, j] = matrix_1[i, :].dot(matrix_2[:, j])
return product
# Seth
def lu_fact(matrix):
size = matrix.shape[0]
L = np.identity(size, float)
U = np.ndarray.astype(matrix, float)
for row in xrange(1, size):
for col in xrange(0, row):
L[row][col] = U[row][col] / U[col][col]
U[row] -= L[row][col] * U[col]
error = matrix_error(multiply_matrix(L, U), matrix)
return L, U, error
# Seth
def find_determinant(matrix):
size = matrix.shape[0]
if size == 1:
return matrix[0][0]
answer = 0
modifier = 1
for i in xrange(size):
element = matrix[0][i]
newMatrix = np.zeros((size - 1, size - 1))
for row in xrange(1, size):
newCol = 0
for col in xrange(size):
if col != i:
newMatrix[row - 1][newCol] = matrix[row][col]
newCol += 1
answer += element * modifier * find_determinant(newMatrix)
modifier *= -1
return answer
# Seth
def vector_error(array):
if len(array) == 0:
return
answer = np.absolute(array[0])
for i in range(len(array)):
if np.absolute(array[i]) > answer:
answer = np.absolute(array[i])
return answer
# Seth
def getDiag(matrix):
diag = np.copy(matrix)
for i in range(diag.shape[0]):
for j in range(diag.shape[1]):
if i != j:
diag[i][j] = 0
return diag
# Seth
def getLowerDiag(matrix):
lower = np.copy(matrix)
for i in range(lower.shape[0]):
for j in range(lower.shape[1]):
if i < j:
lower[i][j] = 0
return lower
# James
def matrix_trace(matrix):
loop = min(matrix.shape[1], matrix.shape[0])
sum = 0
for i in range(loop):
sum += matrix[i, i]
return sum
# James
def vector_norm(vector):
squared_sum = 0
for i in range(len(vector)):
squared_sum += vector[i] ** 2
return np.sqrt(squared_sum)
# James
# if [ a b c
# d e f
# g h i ] , cut_size = 1
# return [ e f
# h i ] , will return same matrix of cut_size = 0
#
def get_sub_matrix(matrix, cut_size=1):
m, n = matrix.shape
if cut_size <= 0:
return matrix
arr = np.empty((m - cut_size, n - cut_size))
for x in range(cut_size, n):
for y in range(cut_size, m):
arr[y - cut_size, x - cut_size] = matrix[y, x]
return arr
# James
def matrix_error(matrix, original_matrix):
if matrix.shape != original_matrix.shape:
return None
y, x = matrix.shape
error_matrix = matrix - original_matrix
# Allowed built-ins were iffy on this one, so didn't use np.sum(matrix-original_matrix, axis=1)
max = abs(error_matrix[0, 0])
for i in range(y):
for j in range(x):
compared = abs(error_matrix[i, j])
if max < compared:
max = compared
return max
# James
# This beautiful code took 3.5 hours T_T
def matrix_cofactor(matrix):
y, x = matrix.shape
cofactor = np.empty([y, x], dtype=float)
for i in range(y):
flip = 1.0 if (i % 2 == 0) else -1.0
for j in range(x):
sub_matrix = np.delete(np.delete(matrix, j, 1), i, 0)
cofactor[i, j] = flip * find_determinant(sub_matrix)
flip *= -1
return cofactor
# James
def matrix_inverse(matrix):
return 1.0 / find_determinant(matrix) * matrix_cofactor(matrix).T
# Emeke
def matrix_inverse_22(matrix):
det = matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0]
matrixB = np.matrix([[matrix[0, 0], - matrix[0, 1]], [-matrix[1, 0], matrix[1, 1]]])
if det == 0:
return None
return (1.0 / det) * matrixB
""" Emeke
Generates 1000 random 2x2 matrices
Create a series of randomly generated matrices with uniformly distributed entries within a given range
shape (tuple(int, int)): Desired shape of matrices.
number (int): Requested number of matrices.
lower (Real): Lower bound for random range.
upper (Real): Upper bound for random range.
"""
def random_matrices(shape, number, lower, upper):
series = tuple()
while len(series) < number:
mat = np.matlib.empty(shape)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
mat[i, j] = rand.uniform(lower, upper)
series += (mat,)
return series
# Emeke
def plot_colored(data, colors, color_label, xlabel, ylabel, title, xscale, yscale, cmap, fname):
pyplot.clf()
# Create colormap object if needed
colormap = None if cmap is None else plotcolors.LinearSegmentedColormap.from_list('cplot', cmap)
# Plot data
pyplot.scatter(data[0], data[1], s=40, c=colors, cmap=colormap)
# Create titles and legend, then render
pyplot.colorbar().set_label(color_label)
pyplot.title(title).set_size('xx-large')
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
pyplot.xlim(xscale)
pyplot.ylim(yscale)
pyplot.savefig(fname)
| cc0-1.0 |
rahulgayatri23/moose-core | python/moose/neuroml2/converter.py | 2 | 7358 | # converter.py ---
#
# Filename: mtoneuroml.py
# Description:
# Author:
# Maintainer:
# Created: Mon Apr 22 12:15:23 2013 (+0530)
# Version:
# Last-Updated: Wed Jul 10 16:36:14 2013 (+0530)
# By: subha
# Update #: 819
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Utility for converting a MOOSE model into NeuroML2. This uses Python
# libNeuroML.
#
#
# Change log:
#
# Tue May 21 16:58:03 IST 2013 - Subha moved the code for function
# fitting to hhfit.py.
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
#!!!!! TODO: unit conversion !!!!
try:
from future_builtins import zip
except ImportError:
pass
import traceback
import warnings
from collections import deque
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
import moose
from moose.utils import autoposition
import neuroml
import hhfit
def convert_morphology(root, positions='auto'):
"""Convert moose neuron morphology contained under `root` into a
NeuroML object. The id of the return object is
{root.name}_morphology. Each segment object gets the numeric value
of the moose id of the object. The name of the segments are same
as the corresponding moose compartment.
Parameters
----------
root : a moose element containing a single cell model.
positions : string
flag to indicate if the positions of the end points of the
compartments are explicitly available in the compartments or
should be automatically generated. Possible values:
`auto` - automatically generate z coordinates using length of the
compartments.
`explicit` - model has explicit coordinates for all compartments.
Return
------
a neuroml.Morphology instance.
"""
if positions == 'auto':
queue = deque([autoposition(root)])
elif positions == 'explicit':
compartments = moose.wildcardFind('%s/##[TYPE=Compartment]' % (root.path))
queue = deque([compartment for compartment in map(moose.element, compartments)
if len(compartment.neighbours['axial']) == 0])
if len(queue) != 1:
raise Exception('There must be one and only one top level compartment. Found %d' % (len(topcomp_list)))
else:
raise Exception('allowed values for keyword argument positions=`auto` or `explicit`')
comp_seg = {}
parent = None
while len(queue) > 0:
compartment = queue.popleft()
proximal = neuroml.Point3DWithDiam(x=compartment.x0,
y=compartment.y0,
z=compartment.z0,
diameter=compartment.diameter)
distal = neuroml.Point3DWithDiam(x=compartment.x,
y=compartment.y,
z=compartment.z,
diameter=compartment.diameter)
plist = list(map(moose.element, compartment.neighbours['axial']))
try:
parent = neuroml.SegmentParent(segments=comp_seg[moose.element(plist[0])].id)
except (KeyError, IndexError) as e:
parent = None
segment = neuroml.Segment(id=compartment.id_.value,
proximal=proximal,
distal=distal,
parent=parent)
# TODO: For the time being using numerical value of the moose
# id for neuroml id.This needs to be updated for handling
# array elements
segment.name = compartment.name
comp_seg[compartment] = segment
queue.extend([comp for comp in map(moose.element, compartment.neighbours['raxial'])])
morph = neuroml.Morphology(id='%s_morphology' % (root.name))
morph.segments.extend(comp_seg.values())
return morph
def define_vdep_rate(fn, name):
"""Define new component type with generic expressions for voltage
dependent rate.
"""
ctype = neuroml.ComponentType(name)
# This is going to be ugly ...
def convert_hhgate(gate):
"""Convert a MOOSE gate into GateHHRates in NeuroML"""
hh_rates = neuroml.GateHHRates(id=gate.id_.value, name=gate.name)
alpha = gate.tableA.copy()
beta = gate.tableB - alpha
vrange = np.linspace(gate.min, gate.max, len(alpha))
afn, ap = hhfit.find_ratefn(vrange, alpha)
bfn, bp = hhfit.find_ratefn(vrange, beta)
if afn is None:
raise Exception('could not find a fitting function for `alpha`')
if bfn is None:
raise Exception('could not find a fitting function for `alpha`')
afn_type = fn_rate_map[afn]
afn_component_type = None
if afn_type is None:
afn_type, afn_component_type = define_component_type(afn)
hh_rates.forward_rate = neuroml.HHRate(type=afn_type,
midpoint='%gmV' % (ap[2]),
scale='%gmV' % (ap[1]),
rate='%gper_ms' % (ap[0]))
bfn_type = fn_rate_map[bfn]
bfn_component_type = None
if bfn_type is None:
bfn_type, bfn_component_type = define_component_type(bfn)
hh_rates.reverse_rate = neuroml.HHRate(type=bfn_type,
midpoint='%gmV' % (bp[2]),
scale='%gmV' % (bp[1]),
rate='%gper_ms' % (bp[0]))
return hh_rates, afn_component_type, bfn_component_type
def convert_hhchannel(channel):
"""Convert a moose HHChannel object into a neuroml element.
TODO: need to check useConcentration option for Ca2+ and V
dependent gates. How to handle generic expressions???
"""
nml_channel = neuroml.IonChannel(id=channel.id_.value,
name=channel.name,
type='ionChannelHH',
conductance=channel.Gbar)
if channel.Xpower > 0:
hh_rate_x = convert_hhgate(channel.gateX[0])
hh_rate_x.instances = channel.Xpower
nml_channel.gate.append(hh_rate_x)
if channel.Ypower > 0:
hh_rate_y = convert_hhgate(channel.gateY[0])
hh_rate_y.instances = channel.Ypower
nml_channel.gate.append(hh_rate_y)
if channel.Zpower > 0:
hh_rate_z = convert_hhgate(channel.gateZ[0])
hh_rate_y.instances = channel.Zpower
nml_channel.gate.append(hh_rate_z)
return nml_channel
#
# converter.py ends here
| gpl-3.0 |
bikong2/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.